blob: 1855e72859a89bdc624bfe76a4544f298ad2da0d [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson88241782011-01-07 17:09:48 +000039static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000042static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
Chris Wilson05394f32010-11-08 19:18:58 +000045static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000046static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
47 unsigned alignment,
48 bool map_and_fenceable);
Chris Wilsond9e86c02010-11-10 16:40:20 +000049static void i915_gem_clear_fence_reg(struct drm_device *dev,
50 struct drm_i915_fence_reg *reg);
Chris Wilson05394f32010-11-08 19:18:58 +000051static int i915_gem_phys_pwrite(struct drm_device *dev,
52 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100053 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000054 struct drm_file *file);
55static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
Eric Anholt673a3942008-07-30 12:06:12 -070056
Chris Wilson17250b72010-10-28 12:51:39 +010057static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070058 struct shrink_control *sc);
Daniel Vetter8c599672011-12-14 13:57:31 +010059static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010060
Chris Wilson73aa8082010-09-30 11:46:12 +010061/* some bookkeeping */
62static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
63 size_t size)
64{
65 dev_priv->mm.object_count++;
66 dev_priv->mm.object_memory += size;
67}
68
69static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
70 size_t size)
71{
72 dev_priv->mm.object_count--;
73 dev_priv->mm.object_memory -= size;
74}
75
Chris Wilson21dd3732011-01-26 15:55:56 +000076static int
77i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010078{
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct completion *x = &dev_priv->error_completion;
81 unsigned long flags;
82 int ret;
83
84 if (!atomic_read(&dev_priv->mm.wedged))
85 return 0;
86
87 ret = wait_for_completion_interruptible(x);
88 if (ret)
89 return ret;
90
Chris Wilson21dd3732011-01-26 15:55:56 +000091 if (atomic_read(&dev_priv->mm.wedged)) {
92 /* GPU is hung, bump the completion count to account for
93 * the token we just consumed so that we never hit zero and
94 * end up waiting upon a subsequent completion event that
95 * will never happen.
96 */
97 spin_lock_irqsave(&x->wait.lock, flags);
98 x->done++;
99 spin_unlock_irqrestore(&x->wait.lock, flags);
100 }
101 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100102}
103
Chris Wilson54cf91d2010-11-25 18:00:26 +0000104int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100105{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100106 int ret;
107
Chris Wilson21dd3732011-01-26 15:55:56 +0000108 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100109 if (ret)
110 return ret;
111
112 ret = mutex_lock_interruptible(&dev->struct_mutex);
113 if (ret)
114 return ret;
115
Chris Wilson23bc5982010-09-29 16:10:57 +0100116 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100117 return 0;
118}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100119
Chris Wilson7d1c4802010-08-07 21:45:03 +0100120static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000121i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100122{
Chris Wilson05394f32010-11-08 19:18:58 +0000123 return obj->gtt_space && !obj->active && obj->pin_count == 0;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100124}
125
Eric Anholt673a3942008-07-30 12:06:12 -0700126int
127i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000128 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700129{
Eric Anholt673a3942008-07-30 12:06:12 -0700130 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000131
132 if (args->gtt_start >= args->gtt_end ||
133 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
134 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700135
136 mutex_lock(&dev->struct_mutex);
Daniel Vetter644ec022012-03-26 09:45:40 +0200137 i915_gem_init_global_gtt(dev, args->gtt_start,
138 args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700139 mutex_unlock(&dev->struct_mutex);
140
Chris Wilson20217462010-11-23 15:26:33 +0000141 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700142}
143
Eric Anholt5a125c32008-10-22 21:40:13 -0700144int
145i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000146 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700147{
Chris Wilson73aa8082010-09-30 11:46:12 +0100148 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700149 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000150 struct drm_i915_gem_object *obj;
151 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700152
153 if (!(dev->driver->driver_features & DRIVER_GEM))
154 return -ENODEV;
155
Chris Wilson6299f992010-11-24 12:23:44 +0000156 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100157 mutex_lock(&dev->struct_mutex);
Chris Wilson6299f992010-11-24 12:23:44 +0000158 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
159 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100160 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700161
Chris Wilson6299f992010-11-24 12:23:44 +0000162 args->aper_size = dev_priv->mm.gtt_total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400163 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000164
Eric Anholt5a125c32008-10-22 21:40:13 -0700165 return 0;
166}
167
Dave Airlieff72145b2011-02-07 12:16:14 +1000168static int
169i915_gem_create(struct drm_file *file,
170 struct drm_device *dev,
171 uint64_t size,
172 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700173{
Chris Wilson05394f32010-11-08 19:18:58 +0000174 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300175 int ret;
176 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700177
Dave Airlieff72145b2011-02-07 12:16:14 +1000178 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200179 if (size == 0)
180 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700181
182 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000183 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700184 if (obj == NULL)
185 return -ENOMEM;
186
Chris Wilson05394f32010-11-08 19:18:58 +0000187 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100188 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000189 drm_gem_object_release(&obj->base);
190 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100191 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700192 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100193 }
194
Chris Wilson202f2fe2010-10-14 13:20:40 +0100195 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000196 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100197 trace_i915_gem_object_create(obj);
198
Dave Airlieff72145b2011-02-07 12:16:14 +1000199 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700200 return 0;
201}
202
Dave Airlieff72145b2011-02-07 12:16:14 +1000203int
204i915_gem_dumb_create(struct drm_file *file,
205 struct drm_device *dev,
206 struct drm_mode_create_dumb *args)
207{
208 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000209 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000210 args->size = args->pitch * args->height;
211 return i915_gem_create(file, dev,
212 args->size, &args->handle);
213}
214
215int i915_gem_dumb_destroy(struct drm_file *file,
216 struct drm_device *dev,
217 uint32_t handle)
218{
219 return drm_gem_handle_delete(file, handle);
220}
221
222/**
223 * Creates a new mm object and returns a handle to it.
224 */
225int
226i915_gem_create_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *file)
228{
229 struct drm_i915_gem_create *args = data;
230 return i915_gem_create(file, dev,
231 args->size, &args->handle);
232}
233
Chris Wilson05394f32010-11-08 19:18:58 +0000234static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700235{
Chris Wilson05394f32010-11-08 19:18:58 +0000236 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700237
238 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000239 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700240}
241
Daniel Vetter8c599672011-12-14 13:57:31 +0100242static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100243__copy_to_user_swizzled(char __user *cpu_vaddr,
244 const char *gpu_vaddr, int gpu_offset,
245 int length)
246{
247 int ret, cpu_offset = 0;
248
249 while (length > 0) {
250 int cacheline_end = ALIGN(gpu_offset + 1, 64);
251 int this_length = min(cacheline_end - gpu_offset, length);
252 int swizzled_gpu_offset = gpu_offset ^ 64;
253
254 ret = __copy_to_user(cpu_vaddr + cpu_offset,
255 gpu_vaddr + swizzled_gpu_offset,
256 this_length);
257 if (ret)
258 return ret + length;
259
260 cpu_offset += this_length;
261 gpu_offset += this_length;
262 length -= this_length;
263 }
264
265 return 0;
266}
267
268static inline int
Daniel Vetter8c599672011-12-14 13:57:31 +0100269__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
270 const char *cpu_vaddr,
271 int length)
272{
273 int ret, cpu_offset = 0;
274
275 while (length > 0) {
276 int cacheline_end = ALIGN(gpu_offset + 1, 64);
277 int this_length = min(cacheline_end - gpu_offset, length);
278 int swizzled_gpu_offset = gpu_offset ^ 64;
279
280 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
281 cpu_vaddr + cpu_offset,
282 this_length);
283 if (ret)
284 return ret + length;
285
286 cpu_offset += this_length;
287 gpu_offset += this_length;
288 length -= this_length;
289 }
290
291 return 0;
292}
293
Eric Anholteb014592009-03-10 11:44:52 -0700294static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200295i915_gem_shmem_pread(struct drm_device *dev,
296 struct drm_i915_gem_object *obj,
297 struct drm_i915_gem_pread *args,
298 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700299{
Chris Wilson05394f32010-11-08 19:18:58 +0000300 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Daniel Vetter8461d222011-12-14 13:57:32 +0100301 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700302 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100303 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100304 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100305 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200306 int hit_slowpath = 0;
Eric Anholteb014592009-03-10 11:44:52 -0700307
Daniel Vetter8461d222011-12-14 13:57:32 +0100308 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholteb014592009-03-10 11:44:52 -0700309 remain = args->size;
310
Daniel Vetter8461d222011-12-14 13:57:32 +0100311 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700312
Eric Anholteb014592009-03-10 11:44:52 -0700313 offset = args->offset;
314
315 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100316 struct page *page;
Daniel Vetter8461d222011-12-14 13:57:32 +0100317 char *vaddr;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100318
Eric Anholteb014592009-03-10 11:44:52 -0700319 /* Operation in this page
320 *
Eric Anholteb014592009-03-10 11:44:52 -0700321 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700322 * page_length = bytes to copy for this page
323 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100324 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700325 page_length = remain;
326 if ((shmem_page_offset + page_length) > PAGE_SIZE)
327 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700328
Hugh Dickins5949eac2011-06-27 16:18:18 -0700329 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Jesper Juhlb65552f2011-06-12 20:53:44 +0000330 if (IS_ERR(page)) {
331 ret = PTR_ERR(page);
332 goto out;
333 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100334
Daniel Vetter8461d222011-12-14 13:57:32 +0100335 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
336 (page_to_phys(page) & (1 << 17)) != 0;
337
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200338 if (!page_do_bit17_swizzling) {
339 vaddr = kmap_atomic(page);
340 ret = __copy_to_user_inatomic(user_data,
341 vaddr + shmem_page_offset,
342 page_length);
343 kunmap_atomic(vaddr);
344 if (ret == 0)
345 goto next_page;
346 }
347
348 hit_slowpath = 1;
349
350 mutex_unlock(&dev->struct_mutex);
351
Daniel Vetter8461d222011-12-14 13:57:32 +0100352 vaddr = kmap(page);
353 if (page_do_bit17_swizzling)
354 ret = __copy_to_user_swizzled(user_data,
355 vaddr, shmem_page_offset,
356 page_length);
357 else
358 ret = __copy_to_user(user_data,
359 vaddr + shmem_page_offset,
360 page_length);
361 kunmap(page);
Eric Anholteb014592009-03-10 11:44:52 -0700362
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200363 mutex_lock(&dev->struct_mutex);
364next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100365 mark_page_accessed(page);
366 page_cache_release(page);
367
Daniel Vetter8461d222011-12-14 13:57:32 +0100368 if (ret) {
369 ret = -EFAULT;
370 goto out;
371 }
372
Eric Anholteb014592009-03-10 11:44:52 -0700373 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100374 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700375 offset += page_length;
376 }
377
Chris Wilson4f27b752010-10-14 15:26:45 +0100378out:
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200379 if (hit_slowpath) {
380 /* Fixup: Kill any reinstated backing storage pages */
381 if (obj->madv == __I915_MADV_PURGED)
382 i915_gem_object_truncate(obj);
383 }
Eric Anholteb014592009-03-10 11:44:52 -0700384
385 return ret;
386}
387
Eric Anholt673a3942008-07-30 12:06:12 -0700388/**
389 * Reads data from the object referenced by handle.
390 *
391 * On error, the contents of *data are undefined.
392 */
393int
394i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000395 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700396{
397 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000398 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100399 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700400
Chris Wilson51311d02010-11-17 09:10:42 +0000401 if (args->size == 0)
402 return 0;
403
404 if (!access_ok(VERIFY_WRITE,
405 (char __user *)(uintptr_t)args->data_ptr,
406 args->size))
407 return -EFAULT;
408
409 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
410 args->size);
411 if (ret)
412 return -EFAULT;
413
Chris Wilson4f27b752010-10-14 15:26:45 +0100414 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100415 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100416 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700417
Chris Wilson05394f32010-11-08 19:18:58 +0000418 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000419 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100420 ret = -ENOENT;
421 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100422 }
Eric Anholt673a3942008-07-30 12:06:12 -0700423
Chris Wilson7dcd2492010-09-26 20:21:44 +0100424 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000425 if (args->offset > obj->base.size ||
426 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100427 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100428 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100429 }
430
Chris Wilsondb53a302011-02-03 11:57:46 +0000431 trace_i915_gem_object_pread(obj, args->offset, args->size);
432
Chris Wilson4f27b752010-10-14 15:26:45 +0100433 ret = i915_gem_object_set_cpu_read_domain_range(obj,
434 args->offset,
435 args->size);
436 if (ret)
Chris Wilsone5281cc2010-10-28 13:45:36 +0100437 goto out;
Chris Wilson4f27b752010-10-14 15:26:45 +0100438
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200439 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700440
Chris Wilson35b62a82010-09-26 20:23:38 +0100441out:
Chris Wilson05394f32010-11-08 19:18:58 +0000442 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100443unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100444 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700445 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700446}
447
Keith Packard0839ccb2008-10-30 19:38:48 -0700448/* This is the fast write path which cannot handle
449 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700450 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700451
Keith Packard0839ccb2008-10-30 19:38:48 -0700452static inline int
453fast_user_write(struct io_mapping *mapping,
454 loff_t page_base, int page_offset,
455 char __user *user_data,
456 int length)
457{
458 char *vaddr_atomic;
459 unsigned long unwritten;
460
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700461 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Keith Packard0839ccb2008-10-30 19:38:48 -0700462 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
463 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700464 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100465 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700466}
467
468/* Here's the write path which can sleep for
469 * page faults
470 */
471
Chris Wilsonab34c222010-05-27 14:15:35 +0100472static inline void
Eric Anholt3de09aa2009-03-09 09:42:23 -0700473slow_kernel_write(struct io_mapping *mapping,
474 loff_t gtt_base, int gtt_offset,
475 struct page *user_page, int user_offset,
476 int length)
Keith Packard0839ccb2008-10-30 19:38:48 -0700477{
Chris Wilsonab34c222010-05-27 14:15:35 +0100478 char __iomem *dst_vaddr;
479 char *src_vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700480
Chris Wilsonab34c222010-05-27 14:15:35 +0100481 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
482 src_vaddr = kmap(user_page);
483
484 memcpy_toio(dst_vaddr + gtt_offset,
485 src_vaddr + user_offset,
486 length);
487
488 kunmap(user_page);
489 io_mapping_unmap(dst_vaddr);
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700490}
491
Eric Anholt3de09aa2009-03-09 09:42:23 -0700492/**
493 * This is the fast pwrite path, where we copy the data directly from the
494 * user into the GTT, uncached.
495 */
Eric Anholt673a3942008-07-30 12:06:12 -0700496static int
Chris Wilson05394f32010-11-08 19:18:58 +0000497i915_gem_gtt_pwrite_fast(struct drm_device *dev,
498 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700499 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000500 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700501{
Keith Packard0839ccb2008-10-30 19:38:48 -0700502 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700503 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700504 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700505 char __user *user_data;
Keith Packard0839ccb2008-10-30 19:38:48 -0700506 int page_offset, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700507
508 user_data = (char __user *) (uintptr_t) args->data_ptr;
509 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700510
Chris Wilson05394f32010-11-08 19:18:58 +0000511 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700512
513 while (remain > 0) {
514 /* Operation in this page
515 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700516 * page_base = page offset within aperture
517 * page_offset = offset within page
518 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700519 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100520 page_base = offset & PAGE_MASK;
521 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700522 page_length = remain;
523 if ((page_offset + remain) > PAGE_SIZE)
524 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700525
Keith Packard0839ccb2008-10-30 19:38:48 -0700526 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700527 * source page isn't available. Return the error and we'll
528 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700529 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100530 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
531 page_offset, user_data, page_length))
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100532 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700533
Keith Packard0839ccb2008-10-30 19:38:48 -0700534 remain -= page_length;
535 user_data += page_length;
536 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700537 }
Eric Anholt673a3942008-07-30 12:06:12 -0700538
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100539 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700540}
541
Eric Anholt3de09aa2009-03-09 09:42:23 -0700542/**
543 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
544 * the memory and maps it using kmap_atomic for copying.
545 *
546 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
547 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
548 */
Eric Anholt3043c602008-10-02 12:24:47 -0700549static int
Chris Wilson05394f32010-11-08 19:18:58 +0000550i915_gem_gtt_pwrite_slow(struct drm_device *dev,
551 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700552 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000553 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700554{
Eric Anholt3de09aa2009-03-09 09:42:23 -0700555 drm_i915_private_t *dev_priv = dev->dev_private;
556 ssize_t remain;
557 loff_t gtt_page_base, offset;
558 loff_t first_data_page, last_data_page, num_pages;
559 loff_t pinned_pages, i;
560 struct page **user_pages;
561 struct mm_struct *mm = current->mm;
562 int gtt_page_offset, data_page_offset, data_page_index, page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700563 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700564 uint64_t data_ptr = args->data_ptr;
565
566 remain = args->size;
567
568 /* Pin the user pages containing the data. We can't fault while
569 * holding the struct mutex, and all of the pwrite implementations
570 * want to hold it while dereferencing the user data.
571 */
572 first_data_page = data_ptr / PAGE_SIZE;
573 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
574 num_pages = last_data_page - first_data_page + 1;
575
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100576 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
Eric Anholt3de09aa2009-03-09 09:42:23 -0700577 if (user_pages == NULL)
578 return -ENOMEM;
579
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100580 mutex_unlock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700581 down_read(&mm->mmap_sem);
582 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
583 num_pages, 0, 0, user_pages, NULL);
584 up_read(&mm->mmap_sem);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100585 mutex_lock(&dev->struct_mutex);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700586 if (pinned_pages < num_pages) {
587 ret = -EFAULT;
588 goto out_unpin_pages;
589 }
590
Chris Wilsond9e86c02010-11-10 16:40:20 +0000591 ret = i915_gem_object_set_to_gtt_domain(obj, true);
592 if (ret)
593 goto out_unpin_pages;
594
595 ret = i915_gem_object_put_fence(obj);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700596 if (ret)
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100597 goto out_unpin_pages;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700598
Chris Wilson05394f32010-11-08 19:18:58 +0000599 offset = obj->gtt_offset + args->offset;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700600
601 while (remain > 0) {
602 /* Operation in this page
603 *
604 * gtt_page_base = page offset within aperture
605 * gtt_page_offset = offset within page in aperture
606 * data_page_index = page number in get_user_pages return
607 * data_page_offset = offset with data_page_index page.
608 * page_length = bytes to copy for this page
609 */
610 gtt_page_base = offset & PAGE_MASK;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100611 gtt_page_offset = offset_in_page(offset);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700612 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100613 data_page_offset = offset_in_page(data_ptr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700614
615 page_length = remain;
616 if ((gtt_page_offset + page_length) > PAGE_SIZE)
617 page_length = PAGE_SIZE - gtt_page_offset;
618 if ((data_page_offset + page_length) > PAGE_SIZE)
619 page_length = PAGE_SIZE - data_page_offset;
620
Chris Wilsonab34c222010-05-27 14:15:35 +0100621 slow_kernel_write(dev_priv->mm.gtt_mapping,
622 gtt_page_base, gtt_page_offset,
623 user_pages[data_page_index],
624 data_page_offset,
625 page_length);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700626
627 remain -= page_length;
628 offset += page_length;
629 data_ptr += page_length;
630 }
631
Eric Anholt3de09aa2009-03-09 09:42:23 -0700632out_unpin_pages:
633 for (i = 0; i < pinned_pages; i++)
634 page_cache_release(user_pages[i]);
Jesse Barnes8e7d2b22009-05-08 16:13:25 -0700635 drm_free_large(user_pages);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700636
637 return ret;
638}
639
Eric Anholt673a3942008-07-30 12:06:12 -0700640static int
Daniel Vettere244a442012-03-25 19:47:28 +0200641i915_gem_shmem_pwrite(struct drm_device *dev,
642 struct drm_i915_gem_object *obj,
643 struct drm_i915_gem_pwrite *args,
644 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700645{
Chris Wilson05394f32010-11-08 19:18:58 +0000646 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700647 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100648 loff_t offset;
649 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100650 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100651 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200652 int hit_slowpath = 0;
Eric Anholt40123c12009-03-09 13:42:30 -0700653
Daniel Vetter8c599672011-12-14 13:57:31 +0100654 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholt40123c12009-03-09 13:42:30 -0700655 remain = args->size;
656
Daniel Vetter8c599672011-12-14 13:57:31 +0100657 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700658
Eric Anholt40123c12009-03-09 13:42:30 -0700659 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000660 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700661
662 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100663 struct page *page;
Daniel Vetter8c599672011-12-14 13:57:31 +0100664 char *vaddr;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100665
Eric Anholt40123c12009-03-09 13:42:30 -0700666 /* Operation in this page
667 *
Eric Anholt40123c12009-03-09 13:42:30 -0700668 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700669 * page_length = bytes to copy for this page
670 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100671 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700672
673 page_length = remain;
674 if ((shmem_page_offset + page_length) > PAGE_SIZE)
675 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700676
Hugh Dickins5949eac2011-06-27 16:18:18 -0700677 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100678 if (IS_ERR(page)) {
679 ret = PTR_ERR(page);
680 goto out;
681 }
682
Daniel Vetter8c599672011-12-14 13:57:31 +0100683 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
684 (page_to_phys(page) & (1 << 17)) != 0;
685
Daniel Vettere244a442012-03-25 19:47:28 +0200686 if (!page_do_bit17_swizzling) {
687 vaddr = kmap_atomic(page);
688 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
689 user_data,
690 page_length);
691 kunmap_atomic(vaddr);
692
693 if (ret == 0)
694 goto next_page;
695 }
696
697 hit_slowpath = 1;
698
699 mutex_unlock(&dev->struct_mutex);
700
Daniel Vetter8c599672011-12-14 13:57:31 +0100701 vaddr = kmap(page);
702 if (page_do_bit17_swizzling)
703 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
704 user_data,
705 page_length);
706 else
707 ret = __copy_from_user(vaddr + shmem_page_offset,
708 user_data,
709 page_length);
710 kunmap(page);
Eric Anholt40123c12009-03-09 13:42:30 -0700711
Daniel Vettere244a442012-03-25 19:47:28 +0200712 mutex_lock(&dev->struct_mutex);
713next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100714 set_page_dirty(page);
715 mark_page_accessed(page);
716 page_cache_release(page);
717
Daniel Vetter8c599672011-12-14 13:57:31 +0100718 if (ret) {
719 ret = -EFAULT;
720 goto out;
721 }
722
Eric Anholt40123c12009-03-09 13:42:30 -0700723 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100724 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700725 offset += page_length;
726 }
727
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100728out:
Daniel Vettere244a442012-03-25 19:47:28 +0200729 if (hit_slowpath) {
730 /* Fixup: Kill any reinstated backing storage pages */
731 if (obj->madv == __I915_MADV_PURGED)
732 i915_gem_object_truncate(obj);
733 /* and flush dirty cachelines in case the object isn't in the cpu write
734 * domain anymore. */
735 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
736 i915_gem_clflush_object(obj);
737 intel_gtt_chipset_flush();
738 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100739 }
Eric Anholt40123c12009-03-09 13:42:30 -0700740
741 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700742}
743
744/**
745 * Writes data to the object referenced by handle.
746 *
747 * On error, the contents of the buffer that were to be modified are undefined.
748 */
749int
750i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100751 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700752{
753 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000754 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000755 int ret;
756
757 if (args->size == 0)
758 return 0;
759
760 if (!access_ok(VERIFY_READ,
761 (char __user *)(uintptr_t)args->data_ptr,
762 args->size))
763 return -EFAULT;
764
765 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
766 args->size);
767 if (ret)
768 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700769
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100770 ret = i915_mutex_lock_interruptible(dev);
771 if (ret)
772 return ret;
773
Chris Wilson05394f32010-11-08 19:18:58 +0000774 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000775 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100776 ret = -ENOENT;
777 goto unlock;
778 }
Eric Anholt673a3942008-07-30 12:06:12 -0700779
Chris Wilson7dcd2492010-09-26 20:21:44 +0100780 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000781 if (args->offset > obj->base.size ||
782 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100783 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100784 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100785 }
786
Chris Wilsondb53a302011-02-03 11:57:46 +0000787 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
788
Eric Anholt673a3942008-07-30 12:06:12 -0700789 /* We can only do the GTT pwrite on untiled buffers, as otherwise
790 * it would end up going through the fenced access, and we'll get
791 * different detiling behavior between reading and writing.
792 * pread/pwrite currently are reading and writing from the CPU
793 * perspective, requiring manual detiling by the client.
794 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100795 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100796 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100797 goto out;
798 }
799
800 if (obj->gtt_space &&
801 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Daniel Vetter75e9e912010-11-04 17:11:09 +0100802 ret = i915_gem_object_pin(obj, 0, true);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100803 if (ret)
804 goto out;
805
Chris Wilsond9e86c02010-11-10 16:40:20 +0000806 ret = i915_gem_object_set_to_gtt_domain(obj, true);
807 if (ret)
808 goto out_unpin;
809
810 ret = i915_gem_object_put_fence(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100811 if (ret)
812 goto out_unpin;
813
814 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
815 if (ret == -EFAULT)
816 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
817
818out_unpin:
819 i915_gem_object_unpin(obj);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100820
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100821 if (ret != -EFAULT)
822 goto out;
823 /* Fall through to the shmfs paths because the gtt paths might
824 * fail with non-page-backed user pointers (e.g. gtt mappings
825 * when moving data between textures). */
Eric Anholt40123c12009-03-09 13:42:30 -0700826 }
Eric Anholt673a3942008-07-30 12:06:12 -0700827
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100828 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
829 if (ret)
830 goto out;
831
Daniel Vettere244a442012-03-25 19:47:28 +0200832 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100833
Chris Wilson35b62a82010-09-26 20:23:38 +0100834out:
Chris Wilson05394f32010-11-08 19:18:58 +0000835 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100836unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100837 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700838 return ret;
839}
840
841/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800842 * Called when user space prepares to use an object with the CPU, either
843 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700844 */
845int
846i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000847 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700848{
849 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000850 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800851 uint32_t read_domains = args->read_domains;
852 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700853 int ret;
854
855 if (!(dev->driver->driver_features & DRIVER_GEM))
856 return -ENODEV;
857
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800858 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100859 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800860 return -EINVAL;
861
Chris Wilson21d509e2009-06-06 09:46:02 +0100862 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800863 return -EINVAL;
864
865 /* Having something in the write domain implies it's in the read
866 * domain, and only that read domain. Enforce that in the request.
867 */
868 if (write_domain != 0 && read_domains != write_domain)
869 return -EINVAL;
870
Chris Wilson76c1dec2010-09-25 11:22:51 +0100871 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100872 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100873 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700874
Chris Wilson05394f32010-11-08 19:18:58 +0000875 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000876 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100877 ret = -ENOENT;
878 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100879 }
Jesse Barnes652c3932009-08-17 13:31:43 -0700880
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800881 if (read_domains & I915_GEM_DOMAIN_GTT) {
882 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -0800883
884 /* Silently promote "you're not bound, there was nothing to do"
885 * to success, since the client was just asking us to
886 * make sure everything was done.
887 */
888 if (ret == -EINVAL)
889 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800890 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -0800891 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800892 }
893
Chris Wilson05394f32010-11-08 19:18:58 +0000894 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100895unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700896 mutex_unlock(&dev->struct_mutex);
897 return ret;
898}
899
900/**
901 * Called when user space has done writes to this buffer
902 */
903int
904i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000905 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700906{
907 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000908 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -0700909 int ret = 0;
910
911 if (!(dev->driver->driver_features & DRIVER_GEM))
912 return -ENODEV;
913
Chris Wilson76c1dec2010-09-25 11:22:51 +0100914 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100915 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100916 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100917
Chris Wilson05394f32010-11-08 19:18:58 +0000918 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000919 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100920 ret = -ENOENT;
921 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -0700922 }
923
Eric Anholt673a3942008-07-30 12:06:12 -0700924 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +0000925 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -0800926 i915_gem_object_flush_cpu_write_domain(obj);
927
Chris Wilson05394f32010-11-08 19:18:58 +0000928 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100929unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700930 mutex_unlock(&dev->struct_mutex);
931 return ret;
932}
933
934/**
935 * Maps the contents of an object, returning the address it is mapped
936 * into.
937 *
938 * While the mapping holds a reference on the contents of the object, it doesn't
939 * imply a ref on the object itself.
940 */
941int
942i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000943 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700944{
945 struct drm_i915_gem_mmap *args = data;
946 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -0700947 unsigned long addr;
948
949 if (!(dev->driver->driver_features & DRIVER_GEM))
950 return -ENODEV;
951
Chris Wilson05394f32010-11-08 19:18:58 +0000952 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -0700953 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100954 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700955
Eric Anholt673a3942008-07-30 12:06:12 -0700956 down_write(&current->mm->mmap_sem);
957 addr = do_mmap(obj->filp, 0, args->size,
958 PROT_READ | PROT_WRITE, MAP_SHARED,
959 args->offset);
960 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000961 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700962 if (IS_ERR((void *)addr))
963 return addr;
964
965 args->addr_ptr = (uint64_t) addr;
966
967 return 0;
968}
969
Jesse Barnesde151cf2008-11-12 10:03:55 -0800970/**
971 * i915_gem_fault - fault a page into the GTT
972 * vma: VMA in question
973 * vmf: fault info
974 *
975 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
976 * from userspace. The fault handler takes care of binding the object to
977 * the GTT (if needed), allocating and programming a fence register (again,
978 * only if needed based on whether the old reg is still valid or the object
979 * is tiled) and inserting a new PTE into the faulting process.
980 *
981 * Note that the faulting process may involve evicting existing objects
982 * from the GTT and/or fence registers to make room. So performance may
983 * suffer if the GTT working set is large or there are few fence registers
984 * left.
985 */
986int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
987{
Chris Wilson05394f32010-11-08 19:18:58 +0000988 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
989 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100990 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -0800991 pgoff_t page_offset;
992 unsigned long pfn;
993 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -0800994 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -0800995
996 /* We don't use vmf->pgoff since that has the fake offset */
997 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
998 PAGE_SHIFT;
999
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001000 ret = i915_mutex_lock_interruptible(dev);
1001 if (ret)
1002 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001003
Chris Wilsondb53a302011-02-03 11:57:46 +00001004 trace_i915_gem_object_fault(obj, page_offset, true, write);
1005
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001006 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001007 if (!obj->map_and_fenceable) {
1008 ret = i915_gem_object_unbind(obj);
1009 if (ret)
1010 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001011 }
Chris Wilson05394f32010-11-08 19:18:58 +00001012 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001013 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001014 if (ret)
1015 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001016
Eric Anholte92d03b2011-06-14 16:43:09 -07001017 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1018 if (ret)
1019 goto unlock;
1020 }
Chris Wilson4a684a42010-10-28 14:44:08 +01001021
Daniel Vetter74898d72012-02-15 23:50:22 +01001022 if (!obj->has_global_gtt_mapping)
1023 i915_gem_gtt_bind_object(obj, obj->cache_level);
1024
Chris Wilsond9e86c02010-11-10 16:40:20 +00001025 if (obj->tiling_mode == I915_TILING_NONE)
1026 ret = i915_gem_object_put_fence(obj);
1027 else
Chris Wilsonce453d82011-02-21 14:43:56 +00001028 ret = i915_gem_object_get_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001029 if (ret)
1030 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001031
Chris Wilson05394f32010-11-08 19:18:58 +00001032 if (i915_gem_object_is_inactive(obj))
1033 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001034
Chris Wilson6299f992010-11-24 12:23:44 +00001035 obj->fault_mappable = true;
1036
Chris Wilson05394f32010-11-08 19:18:58 +00001037 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001038 page_offset;
1039
1040 /* Finally, remap it using the new GTT offset */
1041 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001042unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001043 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001044out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001045 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001046 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +00001047 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001048 /* Give the error handler a chance to run and move the
1049 * objects off the GPU active list. Next time we service the
1050 * fault, we should be able to transition the page into the
1051 * GTT without touching the GPU (and so avoid further
1052 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1053 * with coherency, just lost writes.
1054 */
Chris Wilson045e7692010-11-07 09:18:22 +00001055 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001056 case 0:
1057 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001058 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +01001059 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001060 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001061 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001062 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001063 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001064 }
1065}
1066
1067/**
Chris Wilson901782b2009-07-10 08:18:50 +01001068 * i915_gem_release_mmap - remove physical page mappings
1069 * @obj: obj in question
1070 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001071 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001072 * relinquish ownership of the pages back to the system.
1073 *
1074 * It is vital that we remove the page mapping if we have mapped a tiled
1075 * object through the GTT and then lose the fence register due to
1076 * resource pressure. Similarly if the object has been moved out of the
1077 * aperture, than pages mapped into userspace must be revoked. Removing the
1078 * mapping will then trigger a page fault on the next user access, allowing
1079 * fixup by i915_gem_fault().
1080 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001081void
Chris Wilson05394f32010-11-08 19:18:58 +00001082i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001083{
Chris Wilson6299f992010-11-24 12:23:44 +00001084 if (!obj->fault_mappable)
1085 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001086
Chris Wilsonf6e47882011-03-20 21:09:12 +00001087 if (obj->base.dev->dev_mapping)
1088 unmap_mapping_range(obj->base.dev->dev_mapping,
1089 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1090 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001091
Chris Wilson6299f992010-11-24 12:23:44 +00001092 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001093}
1094
Chris Wilson92b88ae2010-11-09 11:47:32 +00001095static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001096i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001097{
Chris Wilsone28f8712011-07-18 13:11:49 -07001098 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001099
1100 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001101 tiling_mode == I915_TILING_NONE)
1102 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001103
1104 /* Previous chips need a power-of-two fence region when tiling */
1105 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001106 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001107 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001108 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001109
Chris Wilsone28f8712011-07-18 13:11:49 -07001110 while (gtt_size < size)
1111 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001112
Chris Wilsone28f8712011-07-18 13:11:49 -07001113 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001114}
1115
Jesse Barnesde151cf2008-11-12 10:03:55 -08001116/**
1117 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1118 * @obj: object to check
1119 *
1120 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001121 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001122 */
1123static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001124i915_gem_get_gtt_alignment(struct drm_device *dev,
1125 uint32_t size,
1126 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001127{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001128 /*
1129 * Minimum alignment is 4k (GTT page size), but might be greater
1130 * if a fence register is needed for the object.
1131 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001132 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001133 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001134 return 4096;
1135
1136 /*
1137 * Previous chips need to be aligned to the size of the smallest
1138 * fence register that can contain the object.
1139 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001140 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001141}
1142
Daniel Vetter5e783302010-11-14 22:32:36 +01001143/**
1144 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1145 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001146 * @dev: the device
1147 * @size: size of the object
1148 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001149 *
1150 * Return the required GTT alignment for an object, only taking into account
1151 * unfenced tiled surface requirements.
1152 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001153uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001154i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1155 uint32_t size,
1156 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001157{
Daniel Vetter5e783302010-11-14 22:32:36 +01001158 /*
1159 * Minimum alignment is 4k (GTT page size) for sane hw.
1160 */
1161 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001162 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001163 return 4096;
1164
Chris Wilsone28f8712011-07-18 13:11:49 -07001165 /* Previous hardware however needs to be aligned to a power-of-two
1166 * tile height. The simplest method for determining this is to reuse
1167 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001168 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001169 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001170}
1171
Jesse Barnesde151cf2008-11-12 10:03:55 -08001172int
Dave Airlieff72145b2011-02-07 12:16:14 +10001173i915_gem_mmap_gtt(struct drm_file *file,
1174 struct drm_device *dev,
1175 uint32_t handle,
1176 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001177{
Chris Wilsonda761a62010-10-27 17:37:08 +01001178 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001179 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001180 int ret;
1181
1182 if (!(dev->driver->driver_features & DRIVER_GEM))
1183 return -ENODEV;
1184
Chris Wilson76c1dec2010-09-25 11:22:51 +01001185 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001186 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001187 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001188
Dave Airlieff72145b2011-02-07 12:16:14 +10001189 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001190 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001191 ret = -ENOENT;
1192 goto unlock;
1193 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001194
Chris Wilson05394f32010-11-08 19:18:58 +00001195 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001196 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001197 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001198 }
1199
Chris Wilson05394f32010-11-08 19:18:58 +00001200 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001201 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001202 ret = -EINVAL;
1203 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001204 }
1205
Chris Wilson05394f32010-11-08 19:18:58 +00001206 if (!obj->base.map_list.map) {
Rob Clarkb464e9a2011-08-10 08:09:08 -05001207 ret = drm_gem_create_mmap_offset(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001208 if (ret)
1209 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001210 }
1211
Dave Airlieff72145b2011-02-07 12:16:14 +10001212 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001213
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001214out:
Chris Wilson05394f32010-11-08 19:18:58 +00001215 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001216unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001217 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001218 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001219}
1220
Dave Airlieff72145b2011-02-07 12:16:14 +10001221/**
1222 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1223 * @dev: DRM device
1224 * @data: GTT mapping ioctl data
1225 * @file: GEM object info
1226 *
1227 * Simply returns the fake offset to userspace so it can mmap it.
1228 * The mmap call will end up in drm_gem_mmap(), which will set things
1229 * up so we can get faults in the handler above.
1230 *
1231 * The fault handler will take care of binding the object into the GTT
1232 * (since it may have been evicted to make room for something), allocating
1233 * a fence register, and mapping the appropriate aperture address into
1234 * userspace.
1235 */
1236int
1237i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1238 struct drm_file *file)
1239{
1240 struct drm_i915_gem_mmap_gtt *args = data;
1241
1242 if (!(dev->driver->driver_features & DRIVER_GEM))
1243 return -ENODEV;
1244
1245 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1246}
1247
1248
Chris Wilsone5281cc2010-10-28 13:45:36 +01001249static int
Chris Wilson05394f32010-11-08 19:18:58 +00001250i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001251 gfp_t gfpmask)
1252{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001253 int page_count, i;
1254 struct address_space *mapping;
1255 struct inode *inode;
1256 struct page *page;
1257
1258 /* Get the list of pages out of our struct file. They'll be pinned
1259 * at this point until we release them.
1260 */
Chris Wilson05394f32010-11-08 19:18:58 +00001261 page_count = obj->base.size / PAGE_SIZE;
1262 BUG_ON(obj->pages != NULL);
1263 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1264 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001265 return -ENOMEM;
1266
Chris Wilson05394f32010-11-08 19:18:58 +00001267 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001268 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001269 gfpmask |= mapping_gfp_mask(mapping);
1270
Chris Wilsone5281cc2010-10-28 13:45:36 +01001271 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001272 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001273 if (IS_ERR(page))
1274 goto err_pages;
1275
Chris Wilson05394f32010-11-08 19:18:58 +00001276 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001277 }
1278
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001279 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilsone5281cc2010-10-28 13:45:36 +01001280 i915_gem_object_do_bit_17_swizzle(obj);
1281
1282 return 0;
1283
1284err_pages:
1285 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001286 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001287
Chris Wilson05394f32010-11-08 19:18:58 +00001288 drm_free_large(obj->pages);
1289 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001290 return PTR_ERR(page);
1291}
1292
Chris Wilson5cdf5882010-09-27 15:51:07 +01001293static void
Chris Wilson05394f32010-11-08 19:18:58 +00001294i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001295{
Chris Wilson05394f32010-11-08 19:18:58 +00001296 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001297 int i;
1298
Chris Wilson05394f32010-11-08 19:18:58 +00001299 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001300
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001301 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001302 i915_gem_object_save_bit_17_swizzle(obj);
1303
Chris Wilson05394f32010-11-08 19:18:58 +00001304 if (obj->madv == I915_MADV_DONTNEED)
1305 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001306
1307 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001308 if (obj->dirty)
1309 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001310
Chris Wilson05394f32010-11-08 19:18:58 +00001311 if (obj->madv == I915_MADV_WILLNEED)
1312 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001313
Chris Wilson05394f32010-11-08 19:18:58 +00001314 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001315 }
Chris Wilson05394f32010-11-08 19:18:58 +00001316 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001317
Chris Wilson05394f32010-11-08 19:18:58 +00001318 drm_free_large(obj->pages);
1319 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001320}
1321
Chris Wilson54cf91d2010-11-25 18:00:26 +00001322void
Chris Wilson05394f32010-11-08 19:18:58 +00001323i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001324 struct intel_ring_buffer *ring,
1325 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001326{
Chris Wilson05394f32010-11-08 19:18:58 +00001327 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001328 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001329
Zou Nan hai852835f2010-05-21 09:08:56 +08001330 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001331 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001332
1333 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001334 if (!obj->active) {
1335 drm_gem_object_reference(&obj->base);
1336 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001337 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001338
Eric Anholt673a3942008-07-30 12:06:12 -07001339 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001340 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1341 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001342
Chris Wilson05394f32010-11-08 19:18:58 +00001343 obj->last_rendering_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001344 if (obj->fenced_gpu_access) {
1345 struct drm_i915_fence_reg *reg;
1346
1347 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1348
1349 obj->last_fenced_seqno = seqno;
1350 obj->last_fenced_ring = ring;
1351
1352 reg = &dev_priv->fence_regs[obj->fence_reg];
1353 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1354 }
1355}
1356
1357static void
1358i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1359{
1360 list_del_init(&obj->ring_list);
1361 obj->last_rendering_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001362}
1363
Eric Anholtce44b0e2008-11-06 16:00:31 -08001364static void
Chris Wilson05394f32010-11-08 19:18:58 +00001365i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001366{
Chris Wilson05394f32010-11-08 19:18:58 +00001367 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001368 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001369
Chris Wilson05394f32010-11-08 19:18:58 +00001370 BUG_ON(!obj->active);
1371 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001372
1373 i915_gem_object_move_off_active(obj);
1374}
1375
1376static void
1377i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1378{
1379 struct drm_device *dev = obj->base.dev;
1380 struct drm_i915_private *dev_priv = dev->dev_private;
1381
1382 if (obj->pin_count != 0)
1383 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1384 else
1385 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1386
1387 BUG_ON(!list_empty(&obj->gpu_write_list));
1388 BUG_ON(!obj->active);
1389 obj->ring = NULL;
1390
1391 i915_gem_object_move_off_active(obj);
1392 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001393
1394 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001395 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001396 drm_gem_object_unreference(&obj->base);
1397
1398 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001399}
Eric Anholt673a3942008-07-30 12:06:12 -07001400
Chris Wilson963b4832009-09-20 23:03:54 +01001401/* Immediately discard the backing storage */
1402static void
Chris Wilson05394f32010-11-08 19:18:58 +00001403i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001404{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001405 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001406
Chris Wilsonae9fed62010-08-07 11:01:30 +01001407 /* Our goal here is to return as much of the memory as
1408 * is possible back to the system as we are called from OOM.
1409 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001410 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001411 */
Chris Wilson05394f32010-11-08 19:18:58 +00001412 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001413 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001414
Chris Wilsona14917e2012-02-24 21:13:38 +00001415 if (obj->base.map_list.map)
1416 drm_gem_free_mmap_offset(&obj->base);
1417
Chris Wilson05394f32010-11-08 19:18:58 +00001418 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001419}
1420
1421static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001422i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001423{
Chris Wilson05394f32010-11-08 19:18:58 +00001424 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001425}
1426
Eric Anholt673a3942008-07-30 12:06:12 -07001427static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001428i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1429 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001430{
Chris Wilson05394f32010-11-08 19:18:58 +00001431 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001432
Chris Wilson05394f32010-11-08 19:18:58 +00001433 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001434 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001435 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001436 if (obj->base.write_domain & flush_domains) {
1437 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001438
Chris Wilson05394f32010-11-08 19:18:58 +00001439 obj->base.write_domain = 0;
1440 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001441 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001442 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001443
Daniel Vetter63560392010-02-19 11:51:59 +01001444 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001445 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001446 old_write_domain);
1447 }
1448 }
1449}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001450
Daniel Vetter53d227f2012-01-25 16:32:49 +01001451static u32
1452i915_gem_get_seqno(struct drm_device *dev)
1453{
1454 drm_i915_private_t *dev_priv = dev->dev_private;
1455 u32 seqno = dev_priv->next_seqno;
1456
1457 /* reserve 0 for non-seqno */
1458 if (++dev_priv->next_seqno == 0)
1459 dev_priv->next_seqno = 1;
1460
1461 return seqno;
1462}
1463
1464u32
1465i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1466{
1467 if (ring->outstanding_lazy_request == 0)
1468 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1469
1470 return ring->outstanding_lazy_request;
1471}
1472
Chris Wilson3cce4692010-10-27 16:11:02 +01001473int
Chris Wilsondb53a302011-02-03 11:57:46 +00001474i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001475 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001476 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001477{
Chris Wilsondb53a302011-02-03 11:57:46 +00001478 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001479 uint32_t seqno;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001480 u32 request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001481 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001482 int ret;
1483
1484 BUG_ON(request == NULL);
Daniel Vetter53d227f2012-01-25 16:32:49 +01001485 seqno = i915_gem_next_request_seqno(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001486
Chris Wilsona71d8d92012-02-15 11:25:36 +00001487 /* Record the position of the start of the request so that
1488 * should we detect the updated seqno part-way through the
1489 * GPU processing the request, we never over-estimate the
1490 * position of the head.
1491 */
1492 request_ring_position = intel_ring_get_tail(ring);
1493
Chris Wilson3cce4692010-10-27 16:11:02 +01001494 ret = ring->add_request(ring, &seqno);
1495 if (ret)
1496 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001497
Chris Wilsondb53a302011-02-03 11:57:46 +00001498 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001499
1500 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001501 request->ring = ring;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001502 request->tail = request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001503 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001504 was_empty = list_empty(&ring->request_list);
1505 list_add_tail(&request->list, &ring->request_list);
1506
Chris Wilsondb53a302011-02-03 11:57:46 +00001507 if (file) {
1508 struct drm_i915_file_private *file_priv = file->driver_priv;
1509
Chris Wilson1c255952010-09-26 11:03:27 +01001510 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001511 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001512 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001513 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001514 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001515 }
Eric Anholt673a3942008-07-30 12:06:12 -07001516
Daniel Vetter5391d0c2012-01-25 14:03:57 +01001517 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00001518
Ben Gamarif65d9422009-09-14 17:48:44 -04001519 if (!dev_priv->mm.suspended) {
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001520 if (i915_enable_hangcheck) {
1521 mod_timer(&dev_priv->hangcheck_timer,
1522 jiffies +
1523 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1524 }
Ben Gamarif65d9422009-09-14 17:48:44 -04001525 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001526 queue_delayed_work(dev_priv->wq,
1527 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001528 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001529 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001530}
1531
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001532static inline void
1533i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001534{
Chris Wilson1c255952010-09-26 11:03:27 +01001535 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001536
Chris Wilson1c255952010-09-26 11:03:27 +01001537 if (!file_priv)
1538 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001539
Chris Wilson1c255952010-09-26 11:03:27 +01001540 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001541 if (request->file_priv) {
1542 list_del(&request->client_list);
1543 request->file_priv = NULL;
1544 }
Chris Wilson1c255952010-09-26 11:03:27 +01001545 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001546}
1547
Chris Wilsondfaae392010-09-22 10:31:52 +01001548static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1549 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001550{
Chris Wilsondfaae392010-09-22 10:31:52 +01001551 while (!list_empty(&ring->request_list)) {
1552 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001553
Chris Wilsondfaae392010-09-22 10:31:52 +01001554 request = list_first_entry(&ring->request_list,
1555 struct drm_i915_gem_request,
1556 list);
1557
1558 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001559 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001560 kfree(request);
1561 }
1562
1563 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001564 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001565
Chris Wilson05394f32010-11-08 19:18:58 +00001566 obj = list_first_entry(&ring->active_list,
1567 struct drm_i915_gem_object,
1568 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001569
Chris Wilson05394f32010-11-08 19:18:58 +00001570 obj->base.write_domain = 0;
1571 list_del_init(&obj->gpu_write_list);
1572 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001573 }
Eric Anholt673a3942008-07-30 12:06:12 -07001574}
1575
Chris Wilson312817a2010-11-22 11:50:11 +00001576static void i915_gem_reset_fences(struct drm_device *dev)
1577{
1578 struct drm_i915_private *dev_priv = dev->dev_private;
1579 int i;
1580
Daniel Vetter4b9de732011-10-09 21:52:02 +02001581 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00001582 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001583 struct drm_i915_gem_object *obj = reg->obj;
1584
1585 if (!obj)
1586 continue;
1587
1588 if (obj->tiling_mode)
1589 i915_gem_release_mmap(obj);
1590
Chris Wilsond9e86c02010-11-10 16:40:20 +00001591 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1592 reg->obj->fenced_gpu_access = false;
1593 reg->obj->last_fenced_seqno = 0;
1594 reg->obj->last_fenced_ring = NULL;
1595 i915_gem_clear_fence_reg(dev, reg);
Chris Wilson312817a2010-11-22 11:50:11 +00001596 }
1597}
1598
Chris Wilson069efc12010-09-30 16:53:18 +01001599void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001600{
Chris Wilsondfaae392010-09-22 10:31:52 +01001601 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001602 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001603 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001604
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001605 for (i = 0; i < I915_NUM_RINGS; i++)
1606 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001607
1608 /* Remove anything from the flushing lists. The GPU cache is likely
1609 * to be lost on reset along with the data, so simply move the
1610 * lost bo to the inactive list.
1611 */
1612 while (!list_empty(&dev_priv->mm.flushing_list)) {
Akshay Joshi0206e352011-08-16 15:34:10 -04001613 obj = list_first_entry(&dev_priv->mm.flushing_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001614 struct drm_i915_gem_object,
1615 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001616
Chris Wilson05394f32010-11-08 19:18:58 +00001617 obj->base.write_domain = 0;
1618 list_del_init(&obj->gpu_write_list);
1619 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001620 }
Chris Wilson9375e442010-09-19 12:21:28 +01001621
Chris Wilsondfaae392010-09-22 10:31:52 +01001622 /* Move everything out of the GPU domains to ensure we do any
1623 * necessary invalidation upon reuse.
1624 */
Chris Wilson05394f32010-11-08 19:18:58 +00001625 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001626 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001627 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001628 {
Chris Wilson05394f32010-11-08 19:18:58 +00001629 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001630 }
Chris Wilson069efc12010-09-30 16:53:18 +01001631
1632 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001633 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001634}
1635
1636/**
1637 * This function clears the request list as sequence numbers are passed.
1638 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001639void
Chris Wilsondb53a302011-02-03 11:57:46 +00001640i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001641{
Eric Anholt673a3942008-07-30 12:06:12 -07001642 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001643 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001644
Chris Wilsondb53a302011-02-03 11:57:46 +00001645 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001646 return;
1647
Chris Wilsondb53a302011-02-03 11:57:46 +00001648 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001649
Chris Wilson78501ea2010-10-27 12:18:21 +01001650 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001651
Chris Wilson076e2c02011-01-21 10:07:18 +00001652 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001653 if (seqno >= ring->sync_seqno[i])
1654 ring->sync_seqno[i] = 0;
1655
Zou Nan hai852835f2010-05-21 09:08:56 +08001656 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001657 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001658
Zou Nan hai852835f2010-05-21 09:08:56 +08001659 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001660 struct drm_i915_gem_request,
1661 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001662
Chris Wilsondfaae392010-09-22 10:31:52 +01001663 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001664 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001665
Chris Wilsondb53a302011-02-03 11:57:46 +00001666 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001667 /* We know the GPU must have read the request to have
1668 * sent us the seqno + interrupt, so use the position
1669 * of tail of the request to update the last known position
1670 * of the GPU head.
1671 */
1672 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001673
1674 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001675 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001676 kfree(request);
1677 }
1678
1679 /* Move any buffers on the active list that are no longer referenced
1680 * by the ringbuffer to the flushing/inactive lists as appropriate.
1681 */
1682 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001683 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001684
Akshay Joshi0206e352011-08-16 15:34:10 -04001685 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001686 struct drm_i915_gem_object,
1687 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001688
Chris Wilson05394f32010-11-08 19:18:58 +00001689 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001690 break;
1691
Chris Wilson05394f32010-11-08 19:18:58 +00001692 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001693 i915_gem_object_move_to_flushing(obj);
1694 else
1695 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001696 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001697
Chris Wilsondb53a302011-02-03 11:57:46 +00001698 if (unlikely(ring->trace_irq_seqno &&
1699 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001700 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001701 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001702 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001703
Chris Wilsondb53a302011-02-03 11:57:46 +00001704 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001705}
1706
1707void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001708i915_gem_retire_requests(struct drm_device *dev)
1709{
1710 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001711 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001712
Chris Wilsonbe726152010-07-23 23:18:50 +01001713 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001714 struct drm_i915_gem_object *obj, *next;
Chris Wilsonbe726152010-07-23 23:18:50 +01001715
1716 /* We must be careful that during unbind() we do not
1717 * accidentally infinitely recurse into retire requests.
1718 * Currently:
1719 * retire -> free -> unbind -> wait -> retire_ring
1720 */
Chris Wilson05394f32010-11-08 19:18:58 +00001721 list_for_each_entry_safe(obj, next,
Chris Wilsonbe726152010-07-23 23:18:50 +01001722 &dev_priv->mm.deferred_free_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001723 mm_list)
Chris Wilson05394f32010-11-08 19:18:58 +00001724 i915_gem_free_object_tail(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01001725 }
1726
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001727 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilsondb53a302011-02-03 11:57:46 +00001728 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001729}
1730
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001731static void
Eric Anholt673a3942008-07-30 12:06:12 -07001732i915_gem_retire_work_handler(struct work_struct *work)
1733{
1734 drm_i915_private_t *dev_priv;
1735 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001736 bool idle;
1737 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001738
1739 dev_priv = container_of(work, drm_i915_private_t,
1740 mm.retire_work.work);
1741 dev = dev_priv->dev;
1742
Chris Wilson891b48c2010-09-29 12:26:37 +01001743 /* Come back later if the device is busy... */
1744 if (!mutex_trylock(&dev->struct_mutex)) {
1745 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1746 return;
1747 }
1748
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001749 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001750
Chris Wilson0a587052011-01-09 21:05:44 +00001751 /* Send a periodic flush down the ring so we don't hold onto GEM
1752 * objects indefinitely.
1753 */
1754 idle = true;
1755 for (i = 0; i < I915_NUM_RINGS; i++) {
1756 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1757
1758 if (!list_empty(&ring->gpu_write_list)) {
1759 struct drm_i915_gem_request *request;
1760 int ret;
1761
Chris Wilsondb53a302011-02-03 11:57:46 +00001762 ret = i915_gem_flush_ring(ring,
1763 0, I915_GEM_GPU_DOMAINS);
Chris Wilson0a587052011-01-09 21:05:44 +00001764 request = kzalloc(sizeof(*request), GFP_KERNEL);
1765 if (ret || request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00001766 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00001767 kfree(request);
1768 }
1769
1770 idle &= list_empty(&ring->request_list);
1771 }
1772
1773 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001774 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001775
Eric Anholt673a3942008-07-30 12:06:12 -07001776 mutex_unlock(&dev->struct_mutex);
1777}
1778
Chris Wilsondb53a302011-02-03 11:57:46 +00001779/**
1780 * Waits for a sequence number to be signaled, and cleans up the
1781 * request and object lists appropriately for that event.
1782 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001783int
Chris Wilsondb53a302011-02-03 11:57:46 +00001784i915_wait_request(struct intel_ring_buffer *ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001785 uint32_t seqno,
1786 bool do_retire)
Eric Anholt673a3942008-07-30 12:06:12 -07001787{
Chris Wilsondb53a302011-02-03 11:57:46 +00001788 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001789 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001790 int ret = 0;
1791
1792 BUG_ON(seqno == 0);
1793
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001794 if (atomic_read(&dev_priv->mm.wedged)) {
1795 struct completion *x = &dev_priv->error_completion;
1796 bool recovery_complete;
1797 unsigned long flags;
1798
1799 /* Give the error handler a chance to run. */
1800 spin_lock_irqsave(&x->wait.lock, flags);
1801 recovery_complete = x->done > 0;
1802 spin_unlock_irqrestore(&x->wait.lock, flags);
1803
1804 return recovery_complete ? -EIO : -EAGAIN;
1805 }
Ben Gamariffed1d02009-09-14 17:48:41 -04001806
Chris Wilson5d97eb62010-11-10 20:40:02 +00001807 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01001808 struct drm_i915_gem_request *request;
1809
1810 request = kzalloc(sizeof(*request), GFP_KERNEL);
1811 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01001812 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01001813
Chris Wilsondb53a302011-02-03 11:57:46 +00001814 ret = i915_add_request(ring, NULL, request);
Chris Wilson3cce4692010-10-27 16:11:02 +01001815 if (ret) {
1816 kfree(request);
1817 return ret;
1818 }
1819
1820 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01001821 }
1822
Chris Wilson78501ea2010-10-27 12:18:21 +01001823 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001824 if (HAS_PCH_SPLIT(ring->dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001825 ier = I915_READ(DEIER) | I915_READ(GTIER);
1826 else
1827 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001828 if (!ier) {
1829 DRM_ERROR("something (likely vbetool) disabled "
1830 "interrupts, re-enabling\n");
Chris Wilsonf01c22f2011-06-28 11:48:51 +01001831 ring->dev->driver->irq_preinstall(ring->dev);
1832 ring->dev->driver->irq_postinstall(ring->dev);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001833 }
1834
Chris Wilsondb53a302011-02-03 11:57:46 +00001835 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001836
Chris Wilsonb2223492010-10-27 15:27:33 +01001837 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001838 if (ring->irq_get(ring)) {
Chris Wilsonce453d82011-02-21 14:43:56 +00001839 if (dev_priv->mm.interruptible)
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001840 ret = wait_event_interruptible(ring->irq_queue,
1841 i915_seqno_passed(ring->get_seqno(ring), seqno)
1842 || atomic_read(&dev_priv->mm.wedged));
1843 else
1844 wait_event(ring->irq_queue,
1845 i915_seqno_passed(ring->get_seqno(ring), seqno)
1846 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001847
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001848 ring->irq_put(ring);
Eric Anholte959b5d2011-12-22 14:55:01 -08001849 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1850 seqno) ||
1851 atomic_read(&dev_priv->mm.wedged), 3000))
Chris Wilsonb5ba1772010-12-14 12:17:15 +00001852 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01001853 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001854
Chris Wilsondb53a302011-02-03 11:57:46 +00001855 trace_i915_gem_request_wait_end(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001856 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001857 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001858 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001859
Eric Anholt673a3942008-07-30 12:06:12 -07001860 /* Directly dispatch request retiring. While we have the work queue
1861 * to handle this, the waiter on a request often wants an associated
1862 * buffer to have made it to the inactive list, and we would need
1863 * a separate wait queue to handle that.
1864 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001865 if (ret == 0 && do_retire)
Chris Wilsondb53a302011-02-03 11:57:46 +00001866 i915_gem_retire_requests_ring(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001867
1868 return ret;
1869}
1870
Daniel Vetter48764bf2009-09-15 22:57:32 +02001871/**
Eric Anholt673a3942008-07-30 12:06:12 -07001872 * Ensures that all rendering to the object has completed and the object is
1873 * safe to unbind from the GTT or access from the CPU.
1874 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001875int
Chris Wilsonce453d82011-02-21 14:43:56 +00001876i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001877{
Eric Anholt673a3942008-07-30 12:06:12 -07001878 int ret;
1879
Eric Anholte47c68e2008-11-14 13:35:19 -08001880 /* This function only exists to support waiting for existing rendering,
1881 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001882 */
Chris Wilson05394f32010-11-08 19:18:58 +00001883 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001884
1885 /* If there is rendering queued on the buffer being evicted, wait for
1886 * it.
1887 */
Chris Wilson05394f32010-11-08 19:18:58 +00001888 if (obj->active) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001889 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
1890 true);
Chris Wilson2cf34d72010-09-14 13:03:28 +01001891 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07001892 return ret;
1893 }
1894
1895 return 0;
1896}
1897
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001898static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1899{
1900 u32 old_write_domain, old_read_domains;
1901
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001902 /* Act a barrier for all accesses through the GTT */
1903 mb();
1904
1905 /* Force a pagefault for domain tracking on next user access */
1906 i915_gem_release_mmap(obj);
1907
Keith Packardb97c3d92011-06-24 21:02:59 -07001908 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1909 return;
1910
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001911 old_read_domains = obj->base.read_domains;
1912 old_write_domain = obj->base.write_domain;
1913
1914 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1915 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1916
1917 trace_i915_gem_object_change_domain(obj,
1918 old_read_domains,
1919 old_write_domain);
1920}
1921
Eric Anholt673a3942008-07-30 12:06:12 -07001922/**
1923 * Unbinds an object from the GTT aperture.
1924 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08001925int
Chris Wilson05394f32010-11-08 19:18:58 +00001926i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001927{
Daniel Vetter7bddb012012-02-09 17:15:47 +01001928 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001929 int ret = 0;
1930
Chris Wilson05394f32010-11-08 19:18:58 +00001931 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07001932 return 0;
1933
Chris Wilson05394f32010-11-08 19:18:58 +00001934 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07001935 DRM_ERROR("Attempting to unbind pinned buffer\n");
1936 return -EINVAL;
1937 }
1938
Chris Wilsona8198ee2011-04-13 22:04:09 +01001939 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson8dc17752010-07-23 23:18:51 +01001940 if (ret == -ERESTARTSYS)
Eric Anholt673a3942008-07-30 12:06:12 -07001941 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01001942 /* Continue on if we fail due to EIO, the GPU is hung so we
1943 * should be safe and we need to cleanup or else we might
1944 * cause memory corruption through use-after-free.
1945 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01001946
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001947 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01001948
1949 /* Move the object to the CPU domain to ensure that
1950 * any possible CPU writes while it's not in the GTT
1951 * are flushed when we go to remap it.
1952 */
1953 if (ret == 0)
1954 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1955 if (ret == -ERESTARTSYS)
1956 return ret;
Chris Wilson812ed4922010-09-30 15:08:57 +01001957 if (ret) {
Chris Wilsona8198ee2011-04-13 22:04:09 +01001958 /* In the event of a disaster, abandon all caches and
1959 * hope for the best.
1960 */
Chris Wilson812ed4922010-09-30 15:08:57 +01001961 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00001962 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01001963 }
Eric Anholt673a3942008-07-30 12:06:12 -07001964
Daniel Vetter96b47b62009-12-15 17:50:00 +01001965 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00001966 ret = i915_gem_object_put_fence(obj);
1967 if (ret == -ERESTARTSYS)
1968 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01001969
Chris Wilsondb53a302011-02-03 11:57:46 +00001970 trace_i915_gem_object_unbind(obj);
1971
Daniel Vetter74898d72012-02-15 23:50:22 +01001972 if (obj->has_global_gtt_mapping)
1973 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01001974 if (obj->has_aliasing_ppgtt_mapping) {
1975 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1976 obj->has_aliasing_ppgtt_mapping = 0;
1977 }
Daniel Vetter74163902012-02-15 23:50:21 +01001978 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01001979
Chris Wilsone5281cc2010-10-28 13:45:36 +01001980 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001981
Chris Wilson6299f992010-11-24 12:23:44 +00001982 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00001983 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01001984 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00001985 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07001986
Chris Wilson05394f32010-11-08 19:18:58 +00001987 drm_mm_put_block(obj->gtt_space);
1988 obj->gtt_space = NULL;
1989 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001990
Chris Wilson05394f32010-11-08 19:18:58 +00001991 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01001992 i915_gem_object_truncate(obj);
1993
Chris Wilson8dc17752010-07-23 23:18:51 +01001994 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001995}
1996
Chris Wilson88241782011-01-07 17:09:48 +00001997int
Chris Wilsondb53a302011-02-03 11:57:46 +00001998i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00001999 uint32_t invalidate_domains,
2000 uint32_t flush_domains)
2001{
Chris Wilson88241782011-01-07 17:09:48 +00002002 int ret;
2003
Chris Wilson36d527d2011-03-19 22:26:49 +00002004 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2005 return 0;
2006
Chris Wilsondb53a302011-02-03 11:57:46 +00002007 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2008
Chris Wilson88241782011-01-07 17:09:48 +00002009 ret = ring->flush(ring, invalidate_domains, flush_domains);
2010 if (ret)
2011 return ret;
2012
Chris Wilson36d527d2011-03-19 22:26:49 +00002013 if (flush_domains & I915_GEM_GPU_DOMAINS)
2014 i915_gem_process_flushing_list(ring, flush_domains);
2015
Chris Wilson88241782011-01-07 17:09:48 +00002016 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002017}
2018
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002019static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
Chris Wilsona56ba562010-09-28 10:07:56 +01002020{
Chris Wilson88241782011-01-07 17:09:48 +00002021 int ret;
2022
Chris Wilson395b70b2010-10-28 21:28:46 +01002023 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002024 return 0;
2025
Chris Wilson88241782011-01-07 17:09:48 +00002026 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002027 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002028 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002029 if (ret)
2030 return ret;
2031 }
2032
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002033 return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2034 do_retire);
Chris Wilsona56ba562010-09-28 10:07:56 +01002035}
2036
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002037int i915_gpu_idle(struct drm_device *dev, bool do_retire)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002038{
2039 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002040 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002041
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002042 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002043 for (i = 0; i < I915_NUM_RINGS; i++) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002044 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002045 if (ret)
2046 return ret;
2047 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002048
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002049 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002050}
2051
Daniel Vetterc6642782010-11-12 13:46:18 +00002052static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2053 struct intel_ring_buffer *pipelined)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002054{
Chris Wilson05394f32010-11-08 19:18:58 +00002055 struct drm_device *dev = obj->base.dev;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002056 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002057 u32 size = obj->gtt_space->size;
2058 int regnum = obj->fence_reg;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002059 uint64_t val;
2060
Chris Wilson05394f32010-11-08 19:18:58 +00002061 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Daniel Vetterc6642782010-11-12 13:46:18 +00002062 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002063 val |= obj->gtt_offset & 0xfffff000;
2064 val |= (uint64_t)((obj->stride / 128) - 1) <<
Eric Anholt4e901fd2009-10-26 16:44:17 -07002065 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2066
Chris Wilson05394f32010-11-08 19:18:58 +00002067 if (obj->tiling_mode == I915_TILING_Y)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002068 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2069 val |= I965_FENCE_REG_VALID;
2070
Daniel Vetterc6642782010-11-12 13:46:18 +00002071 if (pipelined) {
2072 int ret = intel_ring_begin(pipelined, 6);
2073 if (ret)
2074 return ret;
2075
2076 intel_ring_emit(pipelined, MI_NOOP);
2077 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2078 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2079 intel_ring_emit(pipelined, (u32)val);
2080 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2081 intel_ring_emit(pipelined, (u32)(val >> 32));
2082 intel_ring_advance(pipelined);
2083 } else
2084 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2085
2086 return 0;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002087}
2088
Daniel Vetterc6642782010-11-12 13:46:18 +00002089static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2090 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002091{
Chris Wilson05394f32010-11-08 19:18:58 +00002092 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002093 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002094 u32 size = obj->gtt_space->size;
2095 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002096 uint64_t val;
2097
Chris Wilson05394f32010-11-08 19:18:58 +00002098 val = (uint64_t)((obj->gtt_offset + size - 4096) &
Jesse Barnesde151cf2008-11-12 10:03:55 -08002099 0xfffff000) << 32;
Chris Wilson05394f32010-11-08 19:18:58 +00002100 val |= obj->gtt_offset & 0xfffff000;
2101 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2102 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002103 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2104 val |= I965_FENCE_REG_VALID;
2105
Daniel Vetterc6642782010-11-12 13:46:18 +00002106 if (pipelined) {
2107 int ret = intel_ring_begin(pipelined, 6);
2108 if (ret)
2109 return ret;
2110
2111 intel_ring_emit(pipelined, MI_NOOP);
2112 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2113 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2114 intel_ring_emit(pipelined, (u32)val);
2115 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2116 intel_ring_emit(pipelined, (u32)(val >> 32));
2117 intel_ring_advance(pipelined);
2118 } else
2119 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2120
2121 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002122}
2123
Daniel Vetterc6642782010-11-12 13:46:18 +00002124static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2125 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002126{
Chris Wilson05394f32010-11-08 19:18:58 +00002127 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002128 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002129 u32 size = obj->gtt_space->size;
Daniel Vetterc6642782010-11-12 13:46:18 +00002130 u32 fence_reg, val, pitch_val;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002131 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002132
Daniel Vetterc6642782010-11-12 13:46:18 +00002133 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2134 (size & -size) != size ||
2135 (obj->gtt_offset & (size - 1)),
2136 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2137 obj->gtt_offset, obj->map_and_fenceable, size))
2138 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002139
Daniel Vetterc6642782010-11-12 13:46:18 +00002140 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
Jesse Barnes0f973f22009-01-26 17:10:45 -08002141 tile_width = 128;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002142 else
Jesse Barnes0f973f22009-01-26 17:10:45 -08002143 tile_width = 512;
2144
2145 /* Note: pitch better be a power of two tile widths */
Chris Wilson05394f32010-11-08 19:18:58 +00002146 pitch_val = obj->stride / tile_width;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002147 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002148
Chris Wilson05394f32010-11-08 19:18:58 +00002149 val = obj->gtt_offset;
2150 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002151 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002152 val |= I915_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002153 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2154 val |= I830_FENCE_REG_VALID;
2155
Chris Wilson05394f32010-11-08 19:18:58 +00002156 fence_reg = obj->fence_reg;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002157 if (fence_reg < 8)
2158 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002159 else
Chris Wilsona00b10c2010-09-24 21:15:47 +01002160 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Daniel Vetterc6642782010-11-12 13:46:18 +00002161
2162 if (pipelined) {
2163 int ret = intel_ring_begin(pipelined, 4);
2164 if (ret)
2165 return ret;
2166
2167 intel_ring_emit(pipelined, MI_NOOP);
2168 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2169 intel_ring_emit(pipelined, fence_reg);
2170 intel_ring_emit(pipelined, val);
2171 intel_ring_advance(pipelined);
2172 } else
2173 I915_WRITE(fence_reg, val);
2174
2175 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002176}
2177
Daniel Vetterc6642782010-11-12 13:46:18 +00002178static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2179 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002180{
Chris Wilson05394f32010-11-08 19:18:58 +00002181 struct drm_device *dev = obj->base.dev;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002182 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00002183 u32 size = obj->gtt_space->size;
2184 int regnum = obj->fence_reg;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002185 uint32_t val;
2186 uint32_t pitch_val;
2187
Daniel Vetterc6642782010-11-12 13:46:18 +00002188 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2189 (size & -size) != size ||
2190 (obj->gtt_offset & (size - 1)),
2191 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2192 obj->gtt_offset, size))
2193 return -EINVAL;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002194
Chris Wilson05394f32010-11-08 19:18:58 +00002195 pitch_val = obj->stride / 128;
Eric Anholte76a16d2009-05-26 17:44:56 -07002196 pitch_val = ffs(pitch_val) - 1;
Eric Anholte76a16d2009-05-26 17:44:56 -07002197
Chris Wilson05394f32010-11-08 19:18:58 +00002198 val = obj->gtt_offset;
2199 if (obj->tiling_mode == I915_TILING_Y)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002200 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
Daniel Vetterc6642782010-11-12 13:46:18 +00002201 val |= I830_FENCE_SIZE_BITS(size);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002202 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2203 val |= I830_FENCE_REG_VALID;
2204
Daniel Vetterc6642782010-11-12 13:46:18 +00002205 if (pipelined) {
2206 int ret = intel_ring_begin(pipelined, 4);
2207 if (ret)
2208 return ret;
2209
2210 intel_ring_emit(pipelined, MI_NOOP);
2211 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2212 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2213 intel_ring_emit(pipelined, val);
2214 intel_ring_advance(pipelined);
2215 } else
2216 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2217
2218 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002219}
2220
Chris Wilsond9e86c02010-11-10 16:40:20 +00002221static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2222{
2223 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2224}
2225
2226static int
2227i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002228 struct intel_ring_buffer *pipelined)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002229{
2230 int ret;
2231
2232 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002233 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002234 ret = i915_gem_flush_ring(obj->last_fenced_ring,
Chris Wilson88241782011-01-07 17:09:48 +00002235 0, obj->base.write_domain);
2236 if (ret)
2237 return ret;
2238 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002239
2240 obj->fenced_gpu_access = false;
2241 }
2242
2243 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2244 if (!ring_passed_seqno(obj->last_fenced_ring,
2245 obj->last_fenced_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002246 ret = i915_wait_request(obj->last_fenced_ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002247 obj->last_fenced_seqno,
2248 true);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002249 if (ret)
2250 return ret;
2251 }
2252
2253 obj->last_fenced_seqno = 0;
2254 obj->last_fenced_ring = NULL;
2255 }
2256
Chris Wilson63256ec2011-01-04 18:42:07 +00002257 /* Ensure that all CPU reads are completed before installing a fence
2258 * and all writes before removing the fence.
2259 */
2260 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2261 mb();
2262
Chris Wilsond9e86c02010-11-10 16:40:20 +00002263 return 0;
2264}
2265
2266int
2267i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2268{
2269 int ret;
2270
2271 if (obj->tiling_mode)
2272 i915_gem_release_mmap(obj);
2273
Chris Wilsonce453d82011-02-21 14:43:56 +00002274 ret = i915_gem_object_flush_fence(obj, NULL);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002275 if (ret)
2276 return ret;
2277
2278 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2279 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002280
2281 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002282 i915_gem_clear_fence_reg(obj->base.dev,
2283 &dev_priv->fence_regs[obj->fence_reg]);
2284
2285 obj->fence_reg = I915_FENCE_REG_NONE;
2286 }
2287
2288 return 0;
2289}
2290
2291static struct drm_i915_fence_reg *
2292i915_find_fence_reg(struct drm_device *dev,
2293 struct intel_ring_buffer *pipelined)
Daniel Vetterae3db242010-02-19 11:51:58 +01002294{
Daniel Vetterae3db242010-02-19 11:51:58 +01002295 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002296 struct drm_i915_fence_reg *reg, *first, *avail;
2297 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002298
2299 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002300 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002301 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2302 reg = &dev_priv->fence_regs[i];
2303 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002304 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002305
Chris Wilson1690e1e2011-12-14 13:57:08 +01002306 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002307 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002308 }
2309
Chris Wilsond9e86c02010-11-10 16:40:20 +00002310 if (avail == NULL)
2311 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002312
2313 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002314 avail = first = NULL;
2315 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002316 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002317 continue;
2318
Chris Wilsond9e86c02010-11-10 16:40:20 +00002319 if (first == NULL)
2320 first = reg;
2321
2322 if (!pipelined ||
2323 !reg->obj->last_fenced_ring ||
2324 reg->obj->last_fenced_ring == pipelined) {
2325 avail = reg;
2326 break;
2327 }
Daniel Vetterae3db242010-02-19 11:51:58 +01002328 }
2329
Chris Wilsond9e86c02010-11-10 16:40:20 +00002330 if (avail == NULL)
2331 avail = first;
Daniel Vetterae3db242010-02-19 11:51:58 +01002332
Chris Wilsona00b10c2010-09-24 21:15:47 +01002333 return avail;
Daniel Vetterae3db242010-02-19 11:51:58 +01002334}
2335
Jesse Barnesde151cf2008-11-12 10:03:55 -08002336/**
Chris Wilsond9e86c02010-11-10 16:40:20 +00002337 * i915_gem_object_get_fence - set up a fence reg for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002338 * @obj: object to map through a fence reg
Chris Wilsond9e86c02010-11-10 16:40:20 +00002339 * @pipelined: ring on which to queue the change, or NULL for CPU access
2340 * @interruptible: must we wait uninterruptibly for the register to retire?
Jesse Barnesde151cf2008-11-12 10:03:55 -08002341 *
2342 * When mapping objects through the GTT, userspace wants to be able to write
2343 * to them without having to worry about swizzling if the object is tiled.
2344 *
2345 * This function walks the fence regs looking for a free one for @obj,
2346 * stealing one if it can't find any.
2347 *
2348 * It then sets up the reg based on the object's properties: address, pitch
2349 * and tiling format.
2350 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002351int
Chris Wilsond9e86c02010-11-10 16:40:20 +00002352i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
Chris Wilsonce453d82011-02-21 14:43:56 +00002353 struct intel_ring_buffer *pipelined)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002354{
Chris Wilson05394f32010-11-08 19:18:58 +00002355 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002356 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002357 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002358 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002359
Chris Wilson6bda10d2010-12-05 21:04:18 +00002360 /* XXX disable pipelining. There are bugs. Shocking. */
2361 pipelined = NULL;
2362
Chris Wilsond9e86c02010-11-10 16:40:20 +00002363 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002364 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2365 reg = &dev_priv->fence_regs[obj->fence_reg];
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002366 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002367
Chris Wilson29c5a582011-03-17 15:23:22 +00002368 if (obj->tiling_changed) {
2369 ret = i915_gem_object_flush_fence(obj, pipelined);
2370 if (ret)
2371 return ret;
2372
2373 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2374 pipelined = NULL;
2375
2376 if (pipelined) {
2377 reg->setup_seqno =
2378 i915_gem_next_request_seqno(pipelined);
2379 obj->last_fenced_seqno = reg->setup_seqno;
2380 obj->last_fenced_ring = pipelined;
2381 }
2382
2383 goto update;
2384 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002385
2386 if (!pipelined) {
2387 if (reg->setup_seqno) {
2388 if (!ring_passed_seqno(obj->last_fenced_ring,
2389 reg->setup_seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002390 ret = i915_wait_request(obj->last_fenced_ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002391 reg->setup_seqno,
2392 true);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002393 if (ret)
2394 return ret;
2395 }
2396
2397 reg->setup_seqno = 0;
2398 }
2399 } else if (obj->last_fenced_ring &&
2400 obj->last_fenced_ring != pipelined) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002401 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002402 if (ret)
2403 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002404 }
2405
Eric Anholta09ba7f2009-08-29 12:49:51 -07002406 return 0;
2407 }
2408
Chris Wilsond9e86c02010-11-10 16:40:20 +00002409 reg = i915_find_fence_reg(dev, pipelined);
2410 if (reg == NULL)
Daniel Vetter39965b32011-12-14 13:57:09 +01002411 return -EDEADLK;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002412
Chris Wilsonce453d82011-02-21 14:43:56 +00002413 ret = i915_gem_object_flush_fence(obj, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002414 if (ret)
Daniel Vetterae3db242010-02-19 11:51:58 +01002415 return ret;
Chris Wilsonfc7170b2009-02-11 14:26:46 +00002416
Chris Wilsond9e86c02010-11-10 16:40:20 +00002417 if (reg->obj) {
2418 struct drm_i915_gem_object *old = reg->obj;
2419
2420 drm_gem_object_reference(&old->base);
2421
2422 if (old->tiling_mode)
2423 i915_gem_release_mmap(old);
2424
Chris Wilsonce453d82011-02-21 14:43:56 +00002425 ret = i915_gem_object_flush_fence(old, pipelined);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002426 if (ret) {
2427 drm_gem_object_unreference(&old->base);
2428 return ret;
2429 }
2430
2431 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2432 pipelined = NULL;
2433
2434 old->fence_reg = I915_FENCE_REG_NONE;
2435 old->last_fenced_ring = pipelined;
2436 old->last_fenced_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002437 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002438
2439 drm_gem_object_unreference(&old->base);
2440 } else if (obj->last_fenced_seqno == 0)
2441 pipelined = NULL;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002442
Jesse Barnesde151cf2008-11-12 10:03:55 -08002443 reg->obj = obj;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002444 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2445 obj->fence_reg = reg - dev_priv->fence_regs;
2446 obj->last_fenced_ring = pipelined;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002447
Chris Wilsond9e86c02010-11-10 16:40:20 +00002448 reg->setup_seqno =
Chris Wilsondb53a302011-02-03 11:57:46 +00002449 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002450 obj->last_fenced_seqno = reg->setup_seqno;
2451
2452update:
2453 obj->tiling_changed = false;
Chris Wilsone259bef2010-09-17 00:32:02 +01002454 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002455 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002456 case 6:
Daniel Vetterc6642782010-11-12 13:46:18 +00002457 ret = sandybridge_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002458 break;
2459 case 5:
2460 case 4:
Daniel Vetterc6642782010-11-12 13:46:18 +00002461 ret = i965_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002462 break;
2463 case 3:
Daniel Vetterc6642782010-11-12 13:46:18 +00002464 ret = i915_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002465 break;
2466 case 2:
Daniel Vetterc6642782010-11-12 13:46:18 +00002467 ret = i830_write_fence_reg(obj, pipelined);
Chris Wilsone259bef2010-09-17 00:32:02 +01002468 break;
2469 }
Eric Anholtd9ddcb92009-01-27 10:33:49 -08002470
Daniel Vetterc6642782010-11-12 13:46:18 +00002471 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002472}
2473
2474/**
2475 * i915_gem_clear_fence_reg - clear out fence register info
2476 * @obj: object to clear
2477 *
2478 * Zeroes out the fence register itself and clears out the associated
Chris Wilson05394f32010-11-08 19:18:58 +00002479 * data structures in dev_priv and obj.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002480 */
2481static void
Chris Wilsond9e86c02010-11-10 16:40:20 +00002482i915_gem_clear_fence_reg(struct drm_device *dev,
2483 struct drm_i915_fence_reg *reg)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002484{
Jesse Barnes79e53942008-11-07 14:24:08 -08002485 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002486 uint32_t fence_reg = reg - dev_priv->fence_regs;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002487
Chris Wilsone259bef2010-09-17 00:32:02 +01002488 switch (INTEL_INFO(dev)->gen) {
Eric Anholt25aebfc32011-05-06 13:55:53 -07002489 case 7:
Chris Wilsone259bef2010-09-17 00:32:02 +01002490 case 6:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002491 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002492 break;
2493 case 5:
2494 case 4:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002495 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002496 break;
2497 case 3:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002498 if (fence_reg >= 8)
2499 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002500 else
Chris Wilsone259bef2010-09-17 00:32:02 +01002501 case 2:
Chris Wilsond9e86c02010-11-10 16:40:20 +00002502 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
Eric Anholtdc529a42009-03-10 22:34:49 -07002503
2504 I915_WRITE(fence_reg, 0);
Chris Wilsone259bef2010-09-17 00:32:02 +01002505 break;
Eric Anholtdc529a42009-03-10 22:34:49 -07002506 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002507
Daniel Vetter007cc8a2010-04-28 11:02:31 +02002508 list_del_init(&reg->lru_list);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002509 reg->obj = NULL;
2510 reg->setup_seqno = 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002511 reg->pin_count = 0;
Chris Wilson52dc7d32009-06-06 09:46:01 +01002512}
2513
2514/**
Eric Anholt673a3942008-07-30 12:06:12 -07002515 * Finds free space in the GTT aperture and binds the object there.
2516 */
2517static int
Chris Wilson05394f32010-11-08 19:18:58 +00002518i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002519 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002520 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002521{
Chris Wilson05394f32010-11-08 19:18:58 +00002522 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002523 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002524 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002525 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002526 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002527 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002528 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002529
Chris Wilson05394f32010-11-08 19:18:58 +00002530 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002531 DRM_ERROR("Attempting to bind a purgeable object\n");
2532 return -EINVAL;
2533 }
2534
Chris Wilsone28f8712011-07-18 13:11:49 -07002535 fence_size = i915_gem_get_gtt_size(dev,
2536 obj->base.size,
2537 obj->tiling_mode);
2538 fence_alignment = i915_gem_get_gtt_alignment(dev,
2539 obj->base.size,
2540 obj->tiling_mode);
2541 unfenced_alignment =
2542 i915_gem_get_unfenced_gtt_alignment(dev,
2543 obj->base.size,
2544 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002545
Eric Anholt673a3942008-07-30 12:06:12 -07002546 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002547 alignment = map_and_fenceable ? fence_alignment :
2548 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002549 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002550 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2551 return -EINVAL;
2552 }
2553
Chris Wilson05394f32010-11-08 19:18:58 +00002554 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002555
Chris Wilson654fc602010-05-27 13:18:21 +01002556 /* If the object is bigger than the entire aperture, reject it early
2557 * before evicting everything in a vain attempt to find space.
2558 */
Chris Wilson05394f32010-11-08 19:18:58 +00002559 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002560 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002561 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2562 return -E2BIG;
2563 }
2564
Eric Anholt673a3942008-07-30 12:06:12 -07002565 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002566 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002567 free_space =
2568 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002569 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002570 dev_priv->mm.gtt_mappable_end,
2571 0);
2572 else
2573 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002574 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002575
2576 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002577 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002578 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002579 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002580 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002581 dev_priv->mm.gtt_mappable_end,
2582 0);
2583 else
Chris Wilson05394f32010-11-08 19:18:58 +00002584 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002585 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002586 }
Chris Wilson05394f32010-11-08 19:18:58 +00002587 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002588 /* If the gtt is empty and we're still having trouble
2589 * fitting our object in, we're out of memory.
2590 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002591 ret = i915_gem_evict_something(dev, size, alignment,
2592 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002593 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002594 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002595
Eric Anholt673a3942008-07-30 12:06:12 -07002596 goto search_free;
2597 }
2598
Chris Wilsone5281cc2010-10-28 13:45:36 +01002599 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002600 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002601 drm_mm_put_block(obj->gtt_space);
2602 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002603
2604 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002605 /* first try to reclaim some memory by clearing the GTT */
2606 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002607 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002608 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002609 if (gfpmask) {
2610 gfpmask = 0;
2611 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002612 }
2613
Chris Wilson809b6332011-01-10 17:33:15 +00002614 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002615 }
2616
2617 goto search_free;
2618 }
2619
Eric Anholt673a3942008-07-30 12:06:12 -07002620 return ret;
2621 }
2622
Daniel Vetter74163902012-02-15 23:50:21 +01002623 ret = i915_gem_gtt_prepare_object(obj);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002624 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002625 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002626 drm_mm_put_block(obj->gtt_space);
2627 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002628
Chris Wilson809b6332011-01-10 17:33:15 +00002629 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002630 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002631
2632 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002633 }
Daniel Vetter0ebb9822012-02-15 23:50:24 +01002634
2635 if (!dev_priv->mm.aliasing_ppgtt)
2636 i915_gem_gtt_bind_object(obj, obj->cache_level);
Eric Anholt673a3942008-07-30 12:06:12 -07002637
Chris Wilson6299f992010-11-24 12:23:44 +00002638 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002639 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002640
Eric Anholt673a3942008-07-30 12:06:12 -07002641 /* Assert that the object is not currently in any GPU domain. As it
2642 * wasn't in the GTT, there shouldn't be any way it could have been in
2643 * a GPU cache
2644 */
Chris Wilson05394f32010-11-08 19:18:58 +00002645 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2646 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002647
Chris Wilson6299f992010-11-24 12:23:44 +00002648 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002649
Daniel Vetter75e9e912010-11-04 17:11:09 +01002650 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002651 obj->gtt_space->size == fence_size &&
Akshay Joshi0206e352011-08-16 15:34:10 -04002652 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002653
Daniel Vetter75e9e912010-11-04 17:11:09 +01002654 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002655 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002656
Chris Wilson05394f32010-11-08 19:18:58 +00002657 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002658
Chris Wilsondb53a302011-02-03 11:57:46 +00002659 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002660 return 0;
2661}
2662
2663void
Chris Wilson05394f32010-11-08 19:18:58 +00002664i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002665{
Eric Anholt673a3942008-07-30 12:06:12 -07002666 /* If we don't have a page list set up, then we're not pinned
2667 * to GPU, and we can ignore the cache flush because it'll happen
2668 * again at bind time.
2669 */
Chris Wilson05394f32010-11-08 19:18:58 +00002670 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002671 return;
2672
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002673 /* If the GPU is snooping the contents of the CPU cache,
2674 * we do not need to manually clear the CPU cache lines. However,
2675 * the caches are only snooped when the render cache is
2676 * flushed/invalidated. As we always have to emit invalidations
2677 * and flushes when moving into and out of the RENDER domain, correct
2678 * snooping behaviour occurs naturally as the result of our domain
2679 * tracking.
2680 */
2681 if (obj->cache_level != I915_CACHE_NONE)
2682 return;
2683
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002684 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002685
Chris Wilson05394f32010-11-08 19:18:58 +00002686 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002687}
2688
Eric Anholte47c68e2008-11-14 13:35:19 -08002689/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002690static int
Chris Wilson3619df02010-11-28 15:37:17 +00002691i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002692{
Chris Wilson05394f32010-11-08 19:18:58 +00002693 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002694 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002695
2696 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002697 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002698}
2699
2700/** Flushes the GTT write domain for the object if it's dirty. */
2701static void
Chris Wilson05394f32010-11-08 19:18:58 +00002702i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002703{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002704 uint32_t old_write_domain;
2705
Chris Wilson05394f32010-11-08 19:18:58 +00002706 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002707 return;
2708
Chris Wilson63256ec2011-01-04 18:42:07 +00002709 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002710 * to it immediately go to main memory as far as we know, so there's
2711 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002712 *
2713 * However, we do have to enforce the order so that all writes through
2714 * the GTT land before any writes to the device, such as updates to
2715 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002716 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002717 wmb();
2718
Chris Wilson05394f32010-11-08 19:18:58 +00002719 old_write_domain = obj->base.write_domain;
2720 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002721
2722 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002723 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002724 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002725}
2726
2727/** Flushes the CPU write domain for the object if it's dirty. */
2728static void
Chris Wilson05394f32010-11-08 19:18:58 +00002729i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002730{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002731 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002732
Chris Wilson05394f32010-11-08 19:18:58 +00002733 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002734 return;
2735
2736 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002737 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002738 old_write_domain = obj->base.write_domain;
2739 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002740
2741 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002742 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002743 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002744}
2745
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002746/**
2747 * Moves a single object to the GTT read, and possibly write domain.
2748 *
2749 * This function returns when the move is complete, including waiting on
2750 * flushes to occur.
2751 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002752int
Chris Wilson20217462010-11-23 15:26:33 +00002753i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002754{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002755 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002756 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002757
Eric Anholt02354392008-11-26 13:58:13 -08002758 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002759 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002760 return -EINVAL;
2761
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002762 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2763 return 0;
2764
Chris Wilson88241782011-01-07 17:09:48 +00002765 ret = i915_gem_object_flush_gpu_write_domain(obj);
2766 if (ret)
2767 return ret;
2768
Chris Wilson87ca9c82010-12-02 09:42:56 +00002769 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002770 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002771 if (ret)
2772 return ret;
2773 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002774
Chris Wilson72133422010-09-13 23:56:38 +01002775 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002776
Chris Wilson05394f32010-11-08 19:18:58 +00002777 old_write_domain = obj->base.write_domain;
2778 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002779
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002780 /* It should now be out of any other write domains, and we can update
2781 * the domain values for our changes.
2782 */
Chris Wilson05394f32010-11-08 19:18:58 +00002783 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2784 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002785 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002786 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2787 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2788 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002789 }
2790
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002791 trace_i915_gem_object_change_domain(obj,
2792 old_read_domains,
2793 old_write_domain);
2794
Eric Anholte47c68e2008-11-14 13:35:19 -08002795 return 0;
2796}
2797
Chris Wilsone4ffd172011-04-04 09:44:39 +01002798int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2799 enum i915_cache_level cache_level)
2800{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002801 struct drm_device *dev = obj->base.dev;
2802 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsone4ffd172011-04-04 09:44:39 +01002803 int ret;
2804
2805 if (obj->cache_level == cache_level)
2806 return 0;
2807
2808 if (obj->pin_count) {
2809 DRM_DEBUG("can not change the cache level of pinned objects\n");
2810 return -EBUSY;
2811 }
2812
2813 if (obj->gtt_space) {
2814 ret = i915_gem_object_finish_gpu(obj);
2815 if (ret)
2816 return ret;
2817
2818 i915_gem_object_finish_gtt(obj);
2819
2820 /* Before SandyBridge, you could not use tiling or fence
2821 * registers with snooped memory, so relinquish any fences
2822 * currently pointing to our region in the aperture.
2823 */
2824 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2825 ret = i915_gem_object_put_fence(obj);
2826 if (ret)
2827 return ret;
2828 }
2829
Daniel Vetter74898d72012-02-15 23:50:22 +01002830 if (obj->has_global_gtt_mapping)
2831 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002832 if (obj->has_aliasing_ppgtt_mapping)
2833 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2834 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01002835 }
2836
2837 if (cache_level == I915_CACHE_NONE) {
2838 u32 old_read_domains, old_write_domain;
2839
2840 /* If we're coming from LLC cached, then we haven't
2841 * actually been tracking whether the data is in the
2842 * CPU cache or not, since we only allow one bit set
2843 * in obj->write_domain and have been skipping the clflushes.
2844 * Just set it to the CPU cache for now.
2845 */
2846 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2847 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2848
2849 old_read_domains = obj->base.read_domains;
2850 old_write_domain = obj->base.write_domain;
2851
2852 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2853 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2854
2855 trace_i915_gem_object_change_domain(obj,
2856 old_read_domains,
2857 old_write_domain);
2858 }
2859
2860 obj->cache_level = cache_level;
2861 return 0;
2862}
2863
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002864/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002865 * Prepare buffer for display plane (scanout, cursors, etc).
2866 * Can be called from an uninterruptible phase (modesetting) and allows
2867 * any flushes to be pipelined (for pageflips).
2868 *
2869 * For the display plane, we want to be in the GTT but out of any write
2870 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
2871 * ability to pipeline the waits, pinning and any additional subtleties
2872 * that may differentiate the display plane from ordinary buffers.
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002873 */
2874int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002875i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2876 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00002877 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002878{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002879 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002880 int ret;
2881
Chris Wilson88241782011-01-07 17:09:48 +00002882 ret = i915_gem_object_flush_gpu_write_domain(obj);
2883 if (ret)
2884 return ret;
2885
Chris Wilson0be73282010-12-06 14:36:27 +00002886 if (pipelined != obj->ring) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002887 ret = i915_gem_object_wait_rendering(obj);
Keith Packardf0b69ef2011-07-19 16:21:40 -07002888 if (ret == -ERESTARTSYS)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002889 return ret;
2890 }
2891
Eric Anholta7ef0642011-03-29 16:59:54 -07002892 /* The display engine is not coherent with the LLC cache on gen6. As
2893 * a result, we make sure that the pinning that is about to occur is
2894 * done with uncached PTEs. This is lowest common denominator for all
2895 * chipsets.
2896 *
2897 * However for gen6+, we could do better by using the GFDT bit instead
2898 * of uncaching, which would allow us to flush all the LLC-cached data
2899 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2900 */
2901 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2902 if (ret)
2903 return ret;
2904
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002905 /* As the user may map the buffer once pinned in the display plane
2906 * (e.g. libkms for the bootup splash), we have to ensure that we
2907 * always use map_and_fenceable for all scanout buffers.
2908 */
2909 ret = i915_gem_object_pin(obj, alignment, true);
2910 if (ret)
2911 return ret;
2912
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002913 i915_gem_object_flush_cpu_write_domain(obj);
2914
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002915 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00002916 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002917
2918 /* It should now be out of any other write domains, and we can update
2919 * the domain values for our changes.
2920 */
2921 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00002922 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002923
2924 trace_i915_gem_object_change_domain(obj,
2925 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002926 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002927
2928 return 0;
2929}
2930
Chris Wilson85345512010-11-13 09:49:11 +00002931int
Chris Wilsona8198ee2011-04-13 22:04:09 +01002932i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00002933{
Chris Wilson88241782011-01-07 17:09:48 +00002934 int ret;
2935
Chris Wilsona8198ee2011-04-13 22:04:09 +01002936 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00002937 return 0;
2938
Chris Wilson88241782011-01-07 17:09:48 +00002939 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002940 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00002941 if (ret)
2942 return ret;
2943 }
Chris Wilson85345512010-11-13 09:49:11 +00002944
Chris Wilsonc501ae72011-12-14 13:57:23 +01002945 ret = i915_gem_object_wait_rendering(obj);
2946 if (ret)
2947 return ret;
2948
Chris Wilsona8198ee2011-04-13 22:04:09 +01002949 /* Ensure that we invalidate the GPU's caches and TLBs. */
2950 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01002951 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00002952}
2953
Eric Anholte47c68e2008-11-14 13:35:19 -08002954/**
2955 * Moves a single object to the CPU read, and possibly write domain.
2956 *
2957 * This function returns when the move is complete, including waiting on
2958 * flushes to occur.
2959 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02002960int
Chris Wilson919926a2010-11-12 13:42:53 +00002961i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002962{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002963 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002964 int ret;
2965
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002966 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2967 return 0;
2968
Chris Wilson88241782011-01-07 17:09:48 +00002969 ret = i915_gem_object_flush_gpu_write_domain(obj);
2970 if (ret)
2971 return ret;
2972
Chris Wilsonce453d82011-02-21 14:43:56 +00002973 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01002974 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08002975 return ret;
2976
2977 i915_gem_object_flush_gtt_write_domain(obj);
2978
2979 /* If we have a partially-valid cache of the object in the CPU,
2980 * finish invalidating it and free the per-page flags.
2981 */
2982 i915_gem_object_set_to_full_cpu_read_domain(obj);
2983
Chris Wilson05394f32010-11-08 19:18:58 +00002984 old_write_domain = obj->base.write_domain;
2985 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002986
Eric Anholte47c68e2008-11-14 13:35:19 -08002987 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00002988 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002989 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002990
Chris Wilson05394f32010-11-08 19:18:58 +00002991 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002992 }
2993
2994 /* It should now be out of any other write domains, and we can update
2995 * the domain values for our changes.
2996 */
Chris Wilson05394f32010-11-08 19:18:58 +00002997 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08002998
2999 /* If we're writing through the CPU, then the GPU read domains will
3000 * need to be invalidated at next use.
3001 */
3002 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003003 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3004 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003005 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003006
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003007 trace_i915_gem_object_change_domain(obj,
3008 old_read_domains,
3009 old_write_domain);
3010
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003011 return 0;
3012}
3013
Eric Anholt673a3942008-07-30 12:06:12 -07003014/**
Eric Anholte47c68e2008-11-14 13:35:19 -08003015 * Moves the object from a partially CPU read to a full one.
Eric Anholt673a3942008-07-30 12:06:12 -07003016 *
Eric Anholte47c68e2008-11-14 13:35:19 -08003017 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3018 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3019 */
3020static void
Chris Wilson05394f32010-11-08 19:18:58 +00003021i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08003022{
Chris Wilson05394f32010-11-08 19:18:58 +00003023 if (!obj->page_cpu_valid)
Eric Anholte47c68e2008-11-14 13:35:19 -08003024 return;
3025
3026 /* If we're partially in the CPU read domain, finish moving it in.
3027 */
Chris Wilson05394f32010-11-08 19:18:58 +00003028 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003029 int i;
3030
Chris Wilson05394f32010-11-08 19:18:58 +00003031 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3032 if (obj->page_cpu_valid[i])
Eric Anholte47c68e2008-11-14 13:35:19 -08003033 continue;
Chris Wilson05394f32010-11-08 19:18:58 +00003034 drm_clflush_pages(obj->pages + i, 1);
Eric Anholte47c68e2008-11-14 13:35:19 -08003035 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003036 }
3037
3038 /* Free the page_cpu_valid mappings which are now stale, whether
3039 * or not we've got I915_GEM_DOMAIN_CPU.
3040 */
Chris Wilson05394f32010-11-08 19:18:58 +00003041 kfree(obj->page_cpu_valid);
3042 obj->page_cpu_valid = NULL;
Eric Anholte47c68e2008-11-14 13:35:19 -08003043}
3044
3045/**
3046 * Set the CPU read domain on a range of the object.
3047 *
3048 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3049 * not entirely valid. The page_cpu_valid member of the object flags which
3050 * pages have been flushed, and will be respected by
3051 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3052 * of the whole object.
3053 *
3054 * This function returns when the move is complete, including waiting on
3055 * flushes to occur.
Eric Anholt673a3942008-07-30 12:06:12 -07003056 */
3057static int
Chris Wilson05394f32010-11-08 19:18:58 +00003058i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
Eric Anholte47c68e2008-11-14 13:35:19 -08003059 uint64_t offset, uint64_t size)
Eric Anholt673a3942008-07-30 12:06:12 -07003060{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003061 uint32_t old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003062 int i, ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003063
Chris Wilson05394f32010-11-08 19:18:58 +00003064 if (offset == 0 && size == obj->base.size)
Eric Anholte47c68e2008-11-14 13:35:19 -08003065 return i915_gem_object_set_to_cpu_domain(obj, 0);
3066
Chris Wilson88241782011-01-07 17:09:48 +00003067 ret = i915_gem_object_flush_gpu_write_domain(obj);
3068 if (ret)
3069 return ret;
3070
Chris Wilsonce453d82011-02-21 14:43:56 +00003071 ret = i915_gem_object_wait_rendering(obj);
Daniel Vetterde18a292010-11-27 22:30:41 +01003072 if (ret)
Eric Anholte47c68e2008-11-14 13:35:19 -08003073 return ret;
Daniel Vetterde18a292010-11-27 22:30:41 +01003074
Eric Anholte47c68e2008-11-14 13:35:19 -08003075 i915_gem_object_flush_gtt_write_domain(obj);
3076
3077 /* If we're already fully in the CPU read domain, we're done. */
Chris Wilson05394f32010-11-08 19:18:58 +00003078 if (obj->page_cpu_valid == NULL &&
3079 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003080 return 0;
3081
Eric Anholte47c68e2008-11-14 13:35:19 -08003082 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3083 * newly adding I915_GEM_DOMAIN_CPU
3084 */
Chris Wilson05394f32010-11-08 19:18:58 +00003085 if (obj->page_cpu_valid == NULL) {
3086 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3087 GFP_KERNEL);
3088 if (obj->page_cpu_valid == NULL)
Eric Anholte47c68e2008-11-14 13:35:19 -08003089 return -ENOMEM;
Chris Wilson05394f32010-11-08 19:18:58 +00003090 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3091 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07003092
3093 /* Flush the cache on any pages that are still invalid from the CPU's
3094 * perspective.
3095 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003096 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3097 i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00003098 if (obj->page_cpu_valid[i])
Eric Anholt673a3942008-07-30 12:06:12 -07003099 continue;
3100
Chris Wilson05394f32010-11-08 19:18:58 +00003101 drm_clflush_pages(obj->pages + i, 1);
Eric Anholt673a3942008-07-30 12:06:12 -07003102
Chris Wilson05394f32010-11-08 19:18:58 +00003103 obj->page_cpu_valid[i] = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07003104 }
3105
Eric Anholte47c68e2008-11-14 13:35:19 -08003106 /* It should now be out of any other write domains, and we can update
3107 * the domain values for our changes.
3108 */
Chris Wilson05394f32010-11-08 19:18:58 +00003109 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003110
Chris Wilson05394f32010-11-08 19:18:58 +00003111 old_read_domains = obj->base.read_domains;
3112 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003113
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003114 trace_i915_gem_object_change_domain(obj,
3115 old_read_domains,
Chris Wilson05394f32010-11-08 19:18:58 +00003116 obj->base.write_domain);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003117
Eric Anholt673a3942008-07-30 12:06:12 -07003118 return 0;
3119}
3120
Eric Anholt673a3942008-07-30 12:06:12 -07003121/* Throttle our rendering by waiting until the ring has completed our requests
3122 * emitted over 20 msec ago.
3123 *
Eric Anholtb9624422009-06-03 07:27:35 +00003124 * Note that if we were to use the current jiffies each time around the loop,
3125 * we wouldn't escape the function with any frames outstanding if the time to
3126 * render a frame was over 20ms.
3127 *
Eric Anholt673a3942008-07-30 12:06:12 -07003128 * This should get us reasonable parallelism between CPU and GPU but also
3129 * relatively low latency when blocking on a particular request to finish.
3130 */
3131static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003132i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003133{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003134 struct drm_i915_private *dev_priv = dev->dev_private;
3135 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003136 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003137 struct drm_i915_gem_request *request;
3138 struct intel_ring_buffer *ring = NULL;
3139 u32 seqno = 0;
3140 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003141
Chris Wilsone110e8d2011-01-26 15:39:14 +00003142 if (atomic_read(&dev_priv->mm.wedged))
3143 return -EIO;
3144
Chris Wilson1c255952010-09-26 11:03:27 +01003145 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003146 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003147 if (time_after_eq(request->emitted_jiffies, recent_enough))
3148 break;
3149
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003150 ring = request->ring;
3151 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003152 }
Chris Wilson1c255952010-09-26 11:03:27 +01003153 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003154
3155 if (seqno == 0)
3156 return 0;
3157
3158 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003159 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003160 /* And wait for the seqno passing without holding any locks and
3161 * causing extra latency for others. This is safe as the irq
3162 * generation is designed to be run atomically and so is
3163 * lockless.
3164 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003165 if (ring->irq_get(ring)) {
3166 ret = wait_event_interruptible(ring->irq_queue,
3167 i915_seqno_passed(ring->get_seqno(ring), seqno)
3168 || atomic_read(&dev_priv->mm.wedged));
3169 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003170
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003171 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3172 ret = -EIO;
Eric Anholte959b5d2011-12-22 14:55:01 -08003173 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3174 seqno) ||
Eric Anholt7ea29b12011-12-22 14:54:59 -08003175 atomic_read(&dev_priv->mm.wedged), 3000)) {
3176 ret = -EBUSY;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003177 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003178 }
3179
3180 if (ret == 0)
3181 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003182
Eric Anholt673a3942008-07-30 12:06:12 -07003183 return ret;
3184}
3185
Eric Anholt673a3942008-07-30 12:06:12 -07003186int
Chris Wilson05394f32010-11-08 19:18:58 +00003187i915_gem_object_pin(struct drm_i915_gem_object *obj,
3188 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003189 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003190{
Chris Wilson05394f32010-11-08 19:18:58 +00003191 struct drm_device *dev = obj->base.dev;
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003192 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003193 int ret;
3194
Chris Wilson05394f32010-11-08 19:18:58 +00003195 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilson23bc5982010-09-29 16:10:57 +01003196 WARN_ON(i915_verify_lists(dev));
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003197
Chris Wilson05394f32010-11-08 19:18:58 +00003198 if (obj->gtt_space != NULL) {
3199 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3200 (map_and_fenceable && !obj->map_and_fenceable)) {
3201 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003202 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003203 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3204 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003205 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003206 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003207 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003208 ret = i915_gem_object_unbind(obj);
3209 if (ret)
3210 return ret;
3211 }
3212 }
3213
Chris Wilson05394f32010-11-08 19:18:58 +00003214 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003215 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003216 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003217 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003218 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003219 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003220
Daniel Vetter74898d72012-02-15 23:50:22 +01003221 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3222 i915_gem_gtt_bind_object(obj, obj->cache_level);
3223
Chris Wilson05394f32010-11-08 19:18:58 +00003224 if (obj->pin_count++ == 0) {
Chris Wilson05394f32010-11-08 19:18:58 +00003225 if (!obj->active)
3226 list_move_tail(&obj->mm_list,
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003227 &dev_priv->mm.pinned_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003228 }
Chris Wilson6299f992010-11-24 12:23:44 +00003229 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003230
Chris Wilson23bc5982010-09-29 16:10:57 +01003231 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003232 return 0;
3233}
3234
3235void
Chris Wilson05394f32010-11-08 19:18:58 +00003236i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003237{
Chris Wilson05394f32010-11-08 19:18:58 +00003238 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003239 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07003240
Chris Wilson23bc5982010-09-29 16:10:57 +01003241 WARN_ON(i915_verify_lists(dev));
Chris Wilson05394f32010-11-08 19:18:58 +00003242 BUG_ON(obj->pin_count == 0);
3243 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003244
Chris Wilson05394f32010-11-08 19:18:58 +00003245 if (--obj->pin_count == 0) {
3246 if (!obj->active)
3247 list_move_tail(&obj->mm_list,
Eric Anholt673a3942008-07-30 12:06:12 -07003248 &dev_priv->mm.inactive_list);
Chris Wilson6299f992010-11-24 12:23:44 +00003249 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003250 }
Chris Wilson23bc5982010-09-29 16:10:57 +01003251 WARN_ON(i915_verify_lists(dev));
Eric Anholt673a3942008-07-30 12:06:12 -07003252}
3253
3254int
3255i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003256 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003257{
3258 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003259 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003260 int ret;
3261
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003262 ret = i915_mutex_lock_interruptible(dev);
3263 if (ret)
3264 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003265
Chris Wilson05394f32010-11-08 19:18:58 +00003266 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003267 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003268 ret = -ENOENT;
3269 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003270 }
Eric Anholt673a3942008-07-30 12:06:12 -07003271
Chris Wilson05394f32010-11-08 19:18:58 +00003272 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003273 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003274 ret = -EINVAL;
3275 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003276 }
3277
Chris Wilson05394f32010-11-08 19:18:58 +00003278 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003279 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3280 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003281 ret = -EINVAL;
3282 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003283 }
3284
Chris Wilson05394f32010-11-08 19:18:58 +00003285 obj->user_pin_count++;
3286 obj->pin_filp = file;
3287 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003288 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003289 if (ret)
3290 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003291 }
3292
3293 /* XXX - flush the CPU caches for pinned objects
3294 * as the X server doesn't manage domains yet
3295 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003296 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003297 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003298out:
Chris Wilson05394f32010-11-08 19:18:58 +00003299 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003300unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003301 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003302 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003303}
3304
3305int
3306i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003307 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003308{
3309 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003310 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003311 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003312
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003313 ret = i915_mutex_lock_interruptible(dev);
3314 if (ret)
3315 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003316
Chris Wilson05394f32010-11-08 19:18:58 +00003317 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003318 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003319 ret = -ENOENT;
3320 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003321 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003322
Chris Wilson05394f32010-11-08 19:18:58 +00003323 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003324 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3325 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003326 ret = -EINVAL;
3327 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003328 }
Chris Wilson05394f32010-11-08 19:18:58 +00003329 obj->user_pin_count--;
3330 if (obj->user_pin_count == 0) {
3331 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003332 i915_gem_object_unpin(obj);
3333 }
Eric Anholt673a3942008-07-30 12:06:12 -07003334
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003335out:
Chris Wilson05394f32010-11-08 19:18:58 +00003336 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003337unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003338 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003339 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003340}
3341
3342int
3343i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003344 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003345{
3346 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003347 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003348 int ret;
3349
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003350 ret = i915_mutex_lock_interruptible(dev);
3351 if (ret)
3352 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003353
Chris Wilson05394f32010-11-08 19:18:58 +00003354 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003355 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003356 ret = -ENOENT;
3357 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003358 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003359
Chris Wilson0be555b2010-08-04 15:36:30 +01003360 /* Count all active objects as busy, even if they are currently not used
3361 * by the gpu. Users of this interface expect objects to eventually
3362 * become non-busy without any further actions, therefore emit any
3363 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003364 */
Chris Wilson05394f32010-11-08 19:18:58 +00003365 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003366 if (args->busy) {
3367 /* Unconditionally flush objects, even when the gpu still uses this
3368 * object. Userspace calling this function indicates that it wants to
3369 * use this buffer rather sooner than later, so issuing the required
3370 * flush earlier is beneficial.
3371 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003372 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003373 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00003374 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003375 } else if (obj->ring->outstanding_lazy_request ==
3376 obj->last_rendering_seqno) {
3377 struct drm_i915_gem_request *request;
3378
Chris Wilson7a194872010-12-07 10:38:40 +00003379 /* This ring is not being cleared by active usage,
3380 * so emit a request to do so.
3381 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003382 request = kzalloc(sizeof(*request), GFP_KERNEL);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003383 if (request) {
Akshay Joshi0206e352011-08-16 15:34:10 -04003384 ret = i915_add_request(obj->ring, NULL, request);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003385 if (ret)
3386 kfree(request);
3387 } else
Chris Wilson7a194872010-12-07 10:38:40 +00003388 ret = -ENOMEM;
3389 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003390
3391 /* Update the active list for the hardware's current position.
3392 * Otherwise this only updates on a delayed timer or when irqs
3393 * are actually unmasked, and our working set ends up being
3394 * larger than required.
3395 */
Chris Wilsondb53a302011-02-03 11:57:46 +00003396 i915_gem_retire_requests_ring(obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003397
Chris Wilson05394f32010-11-08 19:18:58 +00003398 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003399 }
Eric Anholt673a3942008-07-30 12:06:12 -07003400
Chris Wilson05394f32010-11-08 19:18:58 +00003401 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003402unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003403 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003404 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003405}
3406
3407int
3408i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3409 struct drm_file *file_priv)
3410{
Akshay Joshi0206e352011-08-16 15:34:10 -04003411 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003412}
3413
Chris Wilson3ef94da2009-09-14 16:50:29 +01003414int
3415i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3416 struct drm_file *file_priv)
3417{
3418 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003419 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003420 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003421
3422 switch (args->madv) {
3423 case I915_MADV_DONTNEED:
3424 case I915_MADV_WILLNEED:
3425 break;
3426 default:
3427 return -EINVAL;
3428 }
3429
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003430 ret = i915_mutex_lock_interruptible(dev);
3431 if (ret)
3432 return ret;
3433
Chris Wilson05394f32010-11-08 19:18:58 +00003434 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003435 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003436 ret = -ENOENT;
3437 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003438 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003439
Chris Wilson05394f32010-11-08 19:18:58 +00003440 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003441 ret = -EINVAL;
3442 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003443 }
3444
Chris Wilson05394f32010-11-08 19:18:58 +00003445 if (obj->madv != __I915_MADV_PURGED)
3446 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003447
Chris Wilson2d7ef392009-09-20 23:13:10 +01003448 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003449 if (i915_gem_object_is_purgeable(obj) &&
3450 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003451 i915_gem_object_truncate(obj);
3452
Chris Wilson05394f32010-11-08 19:18:58 +00003453 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003454
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003455out:
Chris Wilson05394f32010-11-08 19:18:58 +00003456 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003457unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003458 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003459 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003460}
3461
Chris Wilson05394f32010-11-08 19:18:58 +00003462struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3463 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003464{
Chris Wilson73aa8082010-09-30 11:46:12 +01003465 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003466 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003467 struct address_space *mapping;
Daniel Vetterc397b902010-04-09 19:05:07 +00003468
3469 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3470 if (obj == NULL)
3471 return NULL;
3472
3473 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3474 kfree(obj);
3475 return NULL;
3476 }
3477
Hugh Dickins5949eac2011-06-27 16:18:18 -07003478 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3479 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3480
Chris Wilson73aa8082010-09-30 11:46:12 +01003481 i915_gem_info_add_obj(dev_priv, size);
3482
Daniel Vetterc397b902010-04-09 19:05:07 +00003483 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3484 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3485
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003486 if (HAS_LLC(dev)) {
3487 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003488 * cache) for about a 10% performance improvement
3489 * compared to uncached. Graphics requests other than
3490 * display scanout are coherent with the CPU in
3491 * accessing this cache. This means in this mode we
3492 * don't need to clflush on the CPU side, and on the
3493 * GPU side we only need to flush internal caches to
3494 * get data visible to the CPU.
3495 *
3496 * However, we maintain the display planes as UC, and so
3497 * need to rebind when first used as such.
3498 */
3499 obj->cache_level = I915_CACHE_LLC;
3500 } else
3501 obj->cache_level = I915_CACHE_NONE;
3502
Daniel Vetter62b8b212010-04-09 19:05:08 +00003503 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003504 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003505 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003506 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003507 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003508 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003509 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003510 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003511 /* Avoid an unnecessary call to unbind on the first bind. */
3512 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003513
Chris Wilson05394f32010-11-08 19:18:58 +00003514 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003515}
3516
Eric Anholt673a3942008-07-30 12:06:12 -07003517int i915_gem_init_object(struct drm_gem_object *obj)
3518{
Daniel Vetterc397b902010-04-09 19:05:07 +00003519 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003520
Eric Anholt673a3942008-07-30 12:06:12 -07003521 return 0;
3522}
3523
Chris Wilson05394f32010-11-08 19:18:58 +00003524static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003525{
Chris Wilson05394f32010-11-08 19:18:58 +00003526 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003527 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003528 int ret;
3529
3530 ret = i915_gem_object_unbind(obj);
3531 if (ret == -ERESTARTSYS) {
Chris Wilson05394f32010-11-08 19:18:58 +00003532 list_move(&obj->mm_list,
Chris Wilsonbe726152010-07-23 23:18:50 +01003533 &dev_priv->mm.deferred_free_list);
3534 return;
3535 }
3536
Chris Wilson26e12f82011-03-20 11:20:19 +00003537 trace_i915_gem_object_destroy(obj);
3538
Chris Wilson05394f32010-11-08 19:18:58 +00003539 if (obj->base.map_list.map)
Rob Clarkb464e9a2011-08-10 08:09:08 -05003540 drm_gem_free_mmap_offset(&obj->base);
Chris Wilsonbe726152010-07-23 23:18:50 +01003541
Chris Wilson05394f32010-11-08 19:18:58 +00003542 drm_gem_object_release(&obj->base);
3543 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003544
Chris Wilson05394f32010-11-08 19:18:58 +00003545 kfree(obj->page_cpu_valid);
3546 kfree(obj->bit_17);
3547 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003548}
3549
Chris Wilson05394f32010-11-08 19:18:58 +00003550void i915_gem_free_object(struct drm_gem_object *gem_obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003551{
Chris Wilson05394f32010-11-08 19:18:58 +00003552 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3553 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07003554
Chris Wilson05394f32010-11-08 19:18:58 +00003555 while (obj->pin_count > 0)
Eric Anholt673a3942008-07-30 12:06:12 -07003556 i915_gem_object_unpin(obj);
3557
Chris Wilson05394f32010-11-08 19:18:58 +00003558 if (obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003559 i915_gem_detach_phys_object(dev, obj);
3560
Chris Wilsonbe726152010-07-23 23:18:50 +01003561 i915_gem_free_object_tail(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07003562}
3563
Jesse Barnes5669fca2009-02-17 15:13:31 -08003564int
Eric Anholt673a3942008-07-30 12:06:12 -07003565i915_gem_idle(struct drm_device *dev)
3566{
3567 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003568 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003569
Keith Packard6dbe2772008-10-14 21:41:13 -07003570 mutex_lock(&dev->struct_mutex);
3571
Chris Wilson87acb0a2010-10-19 10:13:00 +01003572 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003573 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003574 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003575 }
Eric Anholt673a3942008-07-30 12:06:12 -07003576
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08003577 ret = i915_gpu_idle(dev, true);
Keith Packard6dbe2772008-10-14 21:41:13 -07003578 if (ret) {
3579 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003580 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003581 }
Eric Anholt673a3942008-07-30 12:06:12 -07003582
Chris Wilson29105cc2010-01-07 10:39:13 +00003583 /* Under UMS, be paranoid and evict. */
3584 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
Chris Wilson5eac3ab2010-10-31 08:49:47 +00003585 ret = i915_gem_evict_inactive(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003586 if (ret) {
3587 mutex_unlock(&dev->struct_mutex);
3588 return ret;
3589 }
3590 }
3591
Chris Wilson312817a2010-11-22 11:50:11 +00003592 i915_gem_reset_fences(dev);
3593
Chris Wilson29105cc2010-01-07 10:39:13 +00003594 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3595 * We need to replace this with a semaphore, or something.
3596 * And not confound mm.suspended!
3597 */
3598 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003599 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003600
3601 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003602 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003603
Keith Packard6dbe2772008-10-14 21:41:13 -07003604 mutex_unlock(&dev->struct_mutex);
3605
Chris Wilson29105cc2010-01-07 10:39:13 +00003606 /* Cancel the retire work handler, which should be idle now. */
3607 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3608
Eric Anholt673a3942008-07-30 12:06:12 -07003609 return 0;
3610}
3611
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003612void i915_gem_init_swizzling(struct drm_device *dev)
3613{
3614 drm_i915_private_t *dev_priv = dev->dev_private;
3615
Daniel Vetter11782b02012-01-31 16:47:55 +01003616 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003617 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3618 return;
3619
3620 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3621 DISP_TILE_SURFACE_SWIZZLING);
3622
Daniel Vetter11782b02012-01-31 16:47:55 +01003623 if (IS_GEN5(dev))
3624 return;
3625
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003626 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3627 if (IS_GEN6(dev))
3628 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
3629 else
3630 I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
3631}
Daniel Vettere21af882012-02-09 20:53:27 +01003632
3633void i915_gem_init_ppgtt(struct drm_device *dev)
3634{
3635 drm_i915_private_t *dev_priv = dev->dev_private;
3636 uint32_t pd_offset;
3637 struct intel_ring_buffer *ring;
3638 int i;
3639
3640 if (!dev_priv->mm.aliasing_ppgtt)
3641 return;
3642
3643 pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
3644 pd_offset /= 64; /* in cachelines, */
3645 pd_offset <<= 16;
3646
3647 if (INTEL_INFO(dev)->gen == 6) {
3648 uint32_t ecochk = I915_READ(GAM_ECOCHK);
3649 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3650 ECOCHK_PPGTT_CACHE64B);
3651 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3652 } else if (INTEL_INFO(dev)->gen >= 7) {
3653 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3654 /* GFX_MODE is per-ring on gen7+ */
3655 }
3656
3657 for (i = 0; i < I915_NUM_RINGS; i++) {
3658 ring = &dev_priv->ring[i];
3659
3660 if (INTEL_INFO(dev)->gen >= 7)
3661 I915_WRITE(RING_MODE_GEN7(ring),
3662 GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
3663
3664 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3665 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3666 }
3667}
3668
Eric Anholt673a3942008-07-30 12:06:12 -07003669int
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003670i915_gem_init_hw(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003671{
3672 drm_i915_private_t *dev_priv = dev->dev_private;
3673 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003674
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003675 i915_gem_init_swizzling(dev);
3676
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003677 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003678 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003679 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003680
3681 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003682 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003683 if (ret)
3684 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003685 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003686
Chris Wilson549f7362010-10-19 11:19:32 +01003687 if (HAS_BLT(dev)) {
3688 ret = intel_init_blt_ring_buffer(dev);
3689 if (ret)
3690 goto cleanup_bsd_ring;
3691 }
3692
Chris Wilson6f392d52010-08-07 11:01:22 +01003693 dev_priv->next_seqno = 1;
3694
Daniel Vettere21af882012-02-09 20:53:27 +01003695 i915_gem_init_ppgtt(dev);
3696
Chris Wilson68f95ba2010-05-27 13:18:22 +01003697 return 0;
3698
Chris Wilson549f7362010-10-19 11:19:32 +01003699cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003700 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003701cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003702 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003703 return ret;
3704}
3705
3706void
3707i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3708{
3709 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003710 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003711
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003712 for (i = 0; i < I915_NUM_RINGS; i++)
3713 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003714}
3715
3716int
Eric Anholt673a3942008-07-30 12:06:12 -07003717i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3718 struct drm_file *file_priv)
3719{
3720 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003721 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003722
Jesse Barnes79e53942008-11-07 14:24:08 -08003723 if (drm_core_check_feature(dev, DRIVER_MODESET))
3724 return 0;
3725
Ben Gamariba1234d2009-09-14 17:48:47 -04003726 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003727 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003728 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003729 }
3730
Eric Anholt673a3942008-07-30 12:06:12 -07003731 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003732 dev_priv->mm.suspended = 0;
3733
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003734 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08003735 if (ret != 0) {
3736 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003737 return ret;
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08003738 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003739
Chris Wilson69dc4982010-10-19 10:36:51 +01003740 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003741 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3742 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003743 for (i = 0; i < I915_NUM_RINGS; i++) {
3744 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3745 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3746 }
Eric Anholt673a3942008-07-30 12:06:12 -07003747 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003748
Chris Wilson5f353082010-06-07 14:03:03 +01003749 ret = drm_irq_install(dev);
3750 if (ret)
3751 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003752
Eric Anholt673a3942008-07-30 12:06:12 -07003753 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003754
3755cleanup_ringbuffer:
3756 mutex_lock(&dev->struct_mutex);
3757 i915_gem_cleanup_ringbuffer(dev);
3758 dev_priv->mm.suspended = 1;
3759 mutex_unlock(&dev->struct_mutex);
3760
3761 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003762}
3763
3764int
3765i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3766 struct drm_file *file_priv)
3767{
Jesse Barnes79e53942008-11-07 14:24:08 -08003768 if (drm_core_check_feature(dev, DRIVER_MODESET))
3769 return 0;
3770
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003771 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003772 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003773}
3774
3775void
3776i915_gem_lastclose(struct drm_device *dev)
3777{
3778 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003779
Eric Anholte806b492009-01-22 09:56:58 -08003780 if (drm_core_check_feature(dev, DRIVER_MODESET))
3781 return;
3782
Keith Packard6dbe2772008-10-14 21:41:13 -07003783 ret = i915_gem_idle(dev);
3784 if (ret)
3785 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003786}
3787
Chris Wilson64193402010-10-24 12:38:05 +01003788static void
3789init_ring_lists(struct intel_ring_buffer *ring)
3790{
3791 INIT_LIST_HEAD(&ring->active_list);
3792 INIT_LIST_HEAD(&ring->request_list);
3793 INIT_LIST_HEAD(&ring->gpu_write_list);
3794}
3795
Eric Anholt673a3942008-07-30 12:06:12 -07003796void
3797i915_gem_load(struct drm_device *dev)
3798{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003799 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003800 drm_i915_private_t *dev_priv = dev->dev_private;
3801
Chris Wilson69dc4982010-10-19 10:36:51 +01003802 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003803 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3804 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Chris Wilsonf13d3f72010-09-20 17:36:15 +01003805 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003806 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilsonbe726152010-07-23 23:18:50 +01003807 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003808 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003809 for (i = 0; i < I915_NUM_RINGS; i++)
3810 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02003811 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003812 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003813 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3814 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003815 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003816
Dave Airlie94400122010-07-20 13:15:31 +10003817 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3818 if (IS_GEN3(dev)) {
3819 u32 tmp = I915_READ(MI_ARB_STATE);
3820 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3821 /* arb state is a masked write, so set bit + bit in mask */
3822 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3823 I915_WRITE(MI_ARB_STATE, tmp);
3824 }
3825 }
3826
Chris Wilson72bfa192010-12-19 11:42:05 +00003827 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3828
Jesse Barnesde151cf2008-11-12 10:03:55 -08003829 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003830 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3831 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003832
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003833 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003834 dev_priv->num_fence_regs = 16;
3835 else
3836 dev_priv->num_fence_regs = 8;
3837
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003838 /* Initialize fence registers to zero */
Eric Anholt10ed13e2011-05-06 13:53:49 -07003839 for (i = 0; i < dev_priv->num_fence_regs; i++) {
3840 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003841 }
Eric Anholt10ed13e2011-05-06 13:53:49 -07003842
Eric Anholt673a3942008-07-30 12:06:12 -07003843 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003844 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003845
Chris Wilsonce453d82011-02-21 14:43:56 +00003846 dev_priv->mm.interruptible = true;
3847
Chris Wilson17250b72010-10-28 12:51:39 +01003848 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3849 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3850 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003851}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003852
3853/*
3854 * Create a physically contiguous memory object for this object
3855 * e.g. for cursor + overlay regs
3856 */
Chris Wilson995b67622010-08-20 13:23:26 +01003857static int i915_gem_init_phys_object(struct drm_device *dev,
3858 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003859{
3860 drm_i915_private_t *dev_priv = dev->dev_private;
3861 struct drm_i915_gem_phys_object *phys_obj;
3862 int ret;
3863
3864 if (dev_priv->mm.phys_objs[id - 1] || !size)
3865 return 0;
3866
Eric Anholt9a298b22009-03-24 12:23:04 -07003867 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003868 if (!phys_obj)
3869 return -ENOMEM;
3870
3871 phys_obj->id = id;
3872
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003873 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003874 if (!phys_obj->handle) {
3875 ret = -ENOMEM;
3876 goto kfree_obj;
3877 }
3878#ifdef CONFIG_X86
3879 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3880#endif
3881
3882 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3883
3884 return 0;
3885kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003886 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003887 return ret;
3888}
3889
Chris Wilson995b67622010-08-20 13:23:26 +01003890static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003891{
3892 drm_i915_private_t *dev_priv = dev->dev_private;
3893 struct drm_i915_gem_phys_object *phys_obj;
3894
3895 if (!dev_priv->mm.phys_objs[id - 1])
3896 return;
3897
3898 phys_obj = dev_priv->mm.phys_objs[id - 1];
3899 if (phys_obj->cur_obj) {
3900 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3901 }
3902
3903#ifdef CONFIG_X86
3904 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3905#endif
3906 drm_pci_free(dev, phys_obj->handle);
3907 kfree(phys_obj);
3908 dev_priv->mm.phys_objs[id - 1] = NULL;
3909}
3910
3911void i915_gem_free_all_phys_object(struct drm_device *dev)
3912{
3913 int i;
3914
Dave Airlie260883c2009-01-22 17:58:49 +10003915 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003916 i915_gem_free_phys_object(dev, i);
3917}
3918
3919void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003920 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003921{
Chris Wilson05394f32010-11-08 19:18:58 +00003922 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003923 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003924 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003925 int page_count;
3926
Chris Wilson05394f32010-11-08 19:18:58 +00003927 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003928 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003929 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003930
Chris Wilson05394f32010-11-08 19:18:58 +00003931 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003932 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07003933 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003934 if (!IS_ERR(page)) {
3935 char *dst = kmap_atomic(page);
3936 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3937 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003938
Chris Wilsone5281cc2010-10-28 13:45:36 +01003939 drm_clflush_pages(&page, 1);
3940
3941 set_page_dirty(page);
3942 mark_page_accessed(page);
3943 page_cache_release(page);
3944 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003945 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003946 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003947
Chris Wilson05394f32010-11-08 19:18:58 +00003948 obj->phys_obj->cur_obj = NULL;
3949 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003950}
3951
3952int
3953i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003954 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003955 int id,
3956 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003957{
Chris Wilson05394f32010-11-08 19:18:58 +00003958 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003959 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003960 int ret = 0;
3961 int page_count;
3962 int i;
3963
3964 if (id > I915_MAX_PHYS_OBJECT)
3965 return -EINVAL;
3966
Chris Wilson05394f32010-11-08 19:18:58 +00003967 if (obj->phys_obj) {
3968 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003969 return 0;
3970 i915_gem_detach_phys_object(dev, obj);
3971 }
3972
Dave Airlie71acb5e2008-12-30 20:31:46 +10003973 /* create a new object */
3974 if (!dev_priv->mm.phys_objs[id - 1]) {
3975 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003976 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003977 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003978 DRM_ERROR("failed to init phys object %d size: %zu\n",
3979 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003980 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003981 }
3982 }
3983
3984 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00003985 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3986 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003987
Chris Wilson05394f32010-11-08 19:18:58 +00003988 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003989
3990 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003991 struct page *page;
3992 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003993
Hugh Dickins5949eac2011-06-27 16:18:18 -07003994 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003995 if (IS_ERR(page))
3996 return PTR_ERR(page);
3997
Chris Wilsonff75b9b2010-10-30 22:52:31 +01003998 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00003999 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004000 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004001 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004002
4003 mark_page_accessed(page);
4004 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004005 }
4006
4007 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004008}
4009
4010static int
Chris Wilson05394f32010-11-08 19:18:58 +00004011i915_gem_phys_pwrite(struct drm_device *dev,
4012 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004013 struct drm_i915_gem_pwrite *args,
4014 struct drm_file *file_priv)
4015{
Chris Wilson05394f32010-11-08 19:18:58 +00004016 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004017 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004018
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004019 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4020 unsigned long unwritten;
4021
4022 /* The physical object once assigned is fixed for the lifetime
4023 * of the obj, so we can safely drop the lock and continue
4024 * to access vaddr.
4025 */
4026 mutex_unlock(&dev->struct_mutex);
4027 unwritten = copy_from_user(vaddr, user_data, args->size);
4028 mutex_lock(&dev->struct_mutex);
4029 if (unwritten)
4030 return -EFAULT;
4031 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004032
Daniel Vetter40ce6572010-11-05 18:12:18 +01004033 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10004034 return 0;
4035}
Eric Anholtb9624422009-06-03 07:27:35 +00004036
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004037void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004038{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004039 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004040
4041 /* Clean up our request list when the client is going away, so that
4042 * later retire_requests won't dereference our soon-to-be-gone
4043 * file_priv.
4044 */
Chris Wilson1c255952010-09-26 11:03:27 +01004045 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004046 while (!list_empty(&file_priv->mm.request_list)) {
4047 struct drm_i915_gem_request *request;
4048
4049 request = list_first_entry(&file_priv->mm.request_list,
4050 struct drm_i915_gem_request,
4051 client_list);
4052 list_del(&request->client_list);
4053 request->file_priv = NULL;
4054 }
Chris Wilson1c255952010-09-26 11:03:27 +01004055 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004056}
Chris Wilson31169712009-09-14 16:50:28 +01004057
Chris Wilson31169712009-09-14 16:50:28 +01004058static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004059i915_gpu_is_active(struct drm_device *dev)
4060{
4061 drm_i915_private_t *dev_priv = dev->dev_private;
4062 int lists_empty;
4063
Chris Wilson1637ef42010-04-20 17:10:35 +01004064 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01004065 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01004066
4067 return !lists_empty;
4068}
4069
4070static int
Ying Han1495f232011-05-24 17:12:27 -07004071i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004072{
Chris Wilson17250b72010-10-28 12:51:39 +01004073 struct drm_i915_private *dev_priv =
4074 container_of(shrinker,
4075 struct drm_i915_private,
4076 mm.inactive_shrinker);
4077 struct drm_device *dev = dev_priv->dev;
4078 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07004079 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01004080 int cnt;
4081
4082 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01004083 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01004084
4085 /* "fast-path" to count number of available objects */
4086 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01004087 cnt = 0;
4088 list_for_each_entry(obj,
4089 &dev_priv->mm.inactive_list,
4090 mm_list)
4091 cnt++;
4092 mutex_unlock(&dev->struct_mutex);
4093 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004094 }
4095
Chris Wilson1637ef42010-04-20 17:10:35 +01004096rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004097 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01004098 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01004099
Chris Wilson17250b72010-10-28 12:51:39 +01004100 list_for_each_entry_safe(obj, next,
4101 &dev_priv->mm.inactive_list,
4102 mm_list) {
4103 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004104 if (i915_gem_object_unbind(obj) == 0 &&
4105 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004106 break;
Chris Wilson31169712009-09-14 16:50:28 +01004107 }
Chris Wilson31169712009-09-14 16:50:28 +01004108 }
4109
4110 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004111 cnt = 0;
4112 list_for_each_entry_safe(obj, next,
4113 &dev_priv->mm.inactive_list,
4114 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004115 if (nr_to_scan &&
4116 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004117 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004118 else
Chris Wilson17250b72010-10-28 12:51:39 +01004119 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004120 }
4121
Chris Wilson17250b72010-10-28 12:51:39 +01004122 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004123 /*
4124 * We are desperate for pages, so as a last resort, wait
4125 * for the GPU to finish and discard whatever we can.
4126 * This has a dramatic impact to reduce the number of
4127 * OOM-killer events whilst running the GPU aggressively.
4128 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08004129 if (i915_gpu_idle(dev, true) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004130 goto rescan;
4131 }
Chris Wilson17250b72010-10-28 12:51:39 +01004132 mutex_unlock(&dev->struct_mutex);
4133 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004134}