blob: b46a3fd1774688e43d08d60774a8170cd3881d8b [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson88241782011-01-07 17:09:48 +000039static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000042static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable);
Chris Wilson05394f32010-11-08 19:18:58 +000045static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100047 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000048 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070049
Chris Wilson61050802012-04-17 15:31:31 +010050static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
Chris Wilson17250b72010-10-28 12:51:39 +010056static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070057 struct shrink_control *sc);
Daniel Vetter8c599672011-12-14 13:57:31 +010058static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010059
Chris Wilson61050802012-04-17 15:31:31 +010060static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
61{
62 if (obj->tiling_mode)
63 i915_gem_release_mmap(obj);
64
65 /* As we do not have an associated fence register, we will force
66 * a tiling change if we ever need to acquire one.
67 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010068 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010069 obj->fence_reg = I915_FENCE_REG_NONE;
70}
71
Chris Wilson73aa8082010-09-30 11:46:12 +010072/* some bookkeeping */
73static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
74 size_t size)
75{
76 dev_priv->mm.object_count++;
77 dev_priv->mm.object_memory += size;
78}
79
80static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
81 size_t size)
82{
83 dev_priv->mm.object_count--;
84 dev_priv->mm.object_memory -= size;
85}
86
Chris Wilson21dd3732011-01-26 15:55:56 +000087static int
88i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010089{
90 struct drm_i915_private *dev_priv = dev->dev_private;
91 struct completion *x = &dev_priv->error_completion;
92 unsigned long flags;
93 int ret;
94
95 if (!atomic_read(&dev_priv->mm.wedged))
96 return 0;
97
98 ret = wait_for_completion_interruptible(x);
99 if (ret)
100 return ret;
101
Chris Wilson21dd3732011-01-26 15:55:56 +0000102 if (atomic_read(&dev_priv->mm.wedged)) {
103 /* GPU is hung, bump the completion count to account for
104 * the token we just consumed so that we never hit zero and
105 * end up waiting upon a subsequent completion event that
106 * will never happen.
107 */
108 spin_lock_irqsave(&x->wait.lock, flags);
109 x->done++;
110 spin_unlock_irqrestore(&x->wait.lock, flags);
111 }
112 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113}
114
Chris Wilson54cf91d2010-11-25 18:00:26 +0000115int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100116{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100117 int ret;
118
Chris Wilson21dd3732011-01-26 15:55:56 +0000119 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100120 if (ret)
121 return ret;
122
123 ret = mutex_lock_interruptible(&dev->struct_mutex);
124 if (ret)
125 return ret;
126
Chris Wilson23bc5982010-09-29 16:10:57 +0100127 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100128 return 0;
129}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100130
Chris Wilson7d1c4802010-08-07 21:45:03 +0100131static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000132i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100133{
Chris Wilson1b502472012-04-24 15:47:30 +0100134 return !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100135}
136
Eric Anholt673a3942008-07-30 12:06:12 -0700137int
138i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000139 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700140{
Eric Anholt673a3942008-07-30 12:06:12 -0700141 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000142
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200143 if (drm_core_check_feature(dev, DRIVER_MODESET))
144 return -ENODEV;
145
Chris Wilson20217462010-11-23 15:26:33 +0000146 if (args->gtt_start >= args->gtt_end ||
147 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
148 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700149
Daniel Vetterf534bc02012-03-26 22:37:04 +0200150 /* GEM with user mode setting was never supported on ilk and later. */
151 if (INTEL_INFO(dev)->gen >= 5)
152 return -ENODEV;
153
Eric Anholt673a3942008-07-30 12:06:12 -0700154 mutex_lock(&dev->struct_mutex);
Daniel Vetter644ec022012-03-26 09:45:40 +0200155 i915_gem_init_global_gtt(dev, args->gtt_start,
156 args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700157 mutex_unlock(&dev->struct_mutex);
158
Chris Wilson20217462010-11-23 15:26:33 +0000159 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700160}
161
Eric Anholt5a125c32008-10-22 21:40:13 -0700162int
163i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000164 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700165{
Chris Wilson73aa8082010-09-30 11:46:12 +0100166 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700167 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000168 struct drm_i915_gem_object *obj;
169 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700170
Chris Wilson6299f992010-11-24 12:23:44 +0000171 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100172 mutex_lock(&dev->struct_mutex);
Chris Wilson1b502472012-04-24 15:47:30 +0100173 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
174 if (obj->pin_count)
175 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100176 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700177
Chris Wilson6299f992010-11-24 12:23:44 +0000178 args->aper_size = dev_priv->mm.gtt_total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400179 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000180
Eric Anholt5a125c32008-10-22 21:40:13 -0700181 return 0;
182}
183
Dave Airlieff72145b2011-02-07 12:16:14 +1000184static int
185i915_gem_create(struct drm_file *file,
186 struct drm_device *dev,
187 uint64_t size,
188 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700189{
Chris Wilson05394f32010-11-08 19:18:58 +0000190 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300191 int ret;
192 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700193
Dave Airlieff72145b2011-02-07 12:16:14 +1000194 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200195 if (size == 0)
196 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700197
198 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000199 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700200 if (obj == NULL)
201 return -ENOMEM;
202
Chris Wilson05394f32010-11-08 19:18:58 +0000203 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100204 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000205 drm_gem_object_release(&obj->base);
206 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100207 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700208 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100209 }
210
Chris Wilson202f2fe2010-10-14 13:20:40 +0100211 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000212 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100213 trace_i915_gem_object_create(obj);
214
Dave Airlieff72145b2011-02-07 12:16:14 +1000215 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700216 return 0;
217}
218
Dave Airlieff72145b2011-02-07 12:16:14 +1000219int
220i915_gem_dumb_create(struct drm_file *file,
221 struct drm_device *dev,
222 struct drm_mode_create_dumb *args)
223{
224 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000225 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000226 args->size = args->pitch * args->height;
227 return i915_gem_create(file, dev,
228 args->size, &args->handle);
229}
230
231int i915_gem_dumb_destroy(struct drm_file *file,
232 struct drm_device *dev,
233 uint32_t handle)
234{
235 return drm_gem_handle_delete(file, handle);
236}
237
238/**
239 * Creates a new mm object and returns a handle to it.
240 */
241int
242i915_gem_create_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *file)
244{
245 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200246
Dave Airlieff72145b2011-02-07 12:16:14 +1000247 return i915_gem_create(file, dev,
248 args->size, &args->handle);
249}
250
Chris Wilson05394f32010-11-08 19:18:58 +0000251static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700252{
Chris Wilson05394f32010-11-08 19:18:58 +0000253 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700254
255 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000256 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700257}
258
Daniel Vetter8c599672011-12-14 13:57:31 +0100259static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100260__copy_to_user_swizzled(char __user *cpu_vaddr,
261 const char *gpu_vaddr, int gpu_offset,
262 int length)
263{
264 int ret, cpu_offset = 0;
265
266 while (length > 0) {
267 int cacheline_end = ALIGN(gpu_offset + 1, 64);
268 int this_length = min(cacheline_end - gpu_offset, length);
269 int swizzled_gpu_offset = gpu_offset ^ 64;
270
271 ret = __copy_to_user(cpu_vaddr + cpu_offset,
272 gpu_vaddr + swizzled_gpu_offset,
273 this_length);
274 if (ret)
275 return ret + length;
276
277 cpu_offset += this_length;
278 gpu_offset += this_length;
279 length -= this_length;
280 }
281
282 return 0;
283}
284
285static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700286__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
287 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100288 int length)
289{
290 int ret, cpu_offset = 0;
291
292 while (length > 0) {
293 int cacheline_end = ALIGN(gpu_offset + 1, 64);
294 int this_length = min(cacheline_end - gpu_offset, length);
295 int swizzled_gpu_offset = gpu_offset ^ 64;
296
297 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
298 cpu_vaddr + cpu_offset,
299 this_length);
300 if (ret)
301 return ret + length;
302
303 cpu_offset += this_length;
304 gpu_offset += this_length;
305 length -= this_length;
306 }
307
308 return 0;
309}
310
Daniel Vetterd174bd62012-03-25 19:47:40 +0200311/* Per-page copy function for the shmem pread fastpath.
312 * Flushes invalid cachelines before reading the target if
313 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700314static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200315shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
316 char __user *user_data,
317 bool page_do_bit17_swizzling, bool needs_clflush)
318{
319 char *vaddr;
320 int ret;
321
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200322 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200323 return -EINVAL;
324
325 vaddr = kmap_atomic(page);
326 if (needs_clflush)
327 drm_clflush_virt_range(vaddr + shmem_page_offset,
328 page_length);
329 ret = __copy_to_user_inatomic(user_data,
330 vaddr + shmem_page_offset,
331 page_length);
332 kunmap_atomic(vaddr);
333
334 return ret;
335}
336
Daniel Vetter23c18c72012-03-25 19:47:42 +0200337static void
338shmem_clflush_swizzled_range(char *addr, unsigned long length,
339 bool swizzled)
340{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200341 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200342 unsigned long start = (unsigned long) addr;
343 unsigned long end = (unsigned long) addr + length;
344
345 /* For swizzling simply ensure that we always flush both
346 * channels. Lame, but simple and it works. Swizzled
347 * pwrite/pread is far from a hotpath - current userspace
348 * doesn't use it at all. */
349 start = round_down(start, 128);
350 end = round_up(end, 128);
351
352 drm_clflush_virt_range((void *)start, end - start);
353 } else {
354 drm_clflush_virt_range(addr, length);
355 }
356
357}
358
Daniel Vetterd174bd62012-03-25 19:47:40 +0200359/* Only difference to the fast-path function is that this can handle bit17
360 * and uses non-atomic copy and kmap functions. */
361static int
362shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
363 char __user *user_data,
364 bool page_do_bit17_swizzling, bool needs_clflush)
365{
366 char *vaddr;
367 int ret;
368
369 vaddr = kmap(page);
370 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200371 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
372 page_length,
373 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200374
375 if (page_do_bit17_swizzling)
376 ret = __copy_to_user_swizzled(user_data,
377 vaddr, shmem_page_offset,
378 page_length);
379 else
380 ret = __copy_to_user(user_data,
381 vaddr + shmem_page_offset,
382 page_length);
383 kunmap(page);
384
385 return ret;
386}
387
Eric Anholteb014592009-03-10 11:44:52 -0700388static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200389i915_gem_shmem_pread(struct drm_device *dev,
390 struct drm_i915_gem_object *obj,
391 struct drm_i915_gem_pread *args,
392 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700393{
Chris Wilson05394f32010-11-08 19:18:58 +0000394 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Daniel Vetter8461d222011-12-14 13:57:32 +0100395 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700396 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100397 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100398 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100399 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200400 int hit_slowpath = 0;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200401 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200402 int needs_clflush = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200403 int release_page;
Eric Anholteb014592009-03-10 11:44:52 -0700404
Daniel Vetter8461d222011-12-14 13:57:32 +0100405 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholteb014592009-03-10 11:44:52 -0700406 remain = args->size;
407
Daniel Vetter8461d222011-12-14 13:57:32 +0100408 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700409
Daniel Vetter84897312012-03-25 19:47:31 +0200410 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
411 /* If we're not in the cpu read domain, set ourself into the gtt
412 * read domain and manually flush cachelines (if required). This
413 * optimizes for the case when the gpu will dirty the data
414 * anyway again before the next pread happens. */
415 if (obj->cache_level == I915_CACHE_NONE)
416 needs_clflush = 1;
417 ret = i915_gem_object_set_to_gtt_domain(obj, false);
418 if (ret)
419 return ret;
420 }
Eric Anholteb014592009-03-10 11:44:52 -0700421
Eric Anholteb014592009-03-10 11:44:52 -0700422 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100423
Eric Anholteb014592009-03-10 11:44:52 -0700424 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100425 struct page *page;
426
Eric Anholteb014592009-03-10 11:44:52 -0700427 /* Operation in this page
428 *
Eric Anholteb014592009-03-10 11:44:52 -0700429 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700430 * page_length = bytes to copy for this page
431 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100432 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700433 page_length = remain;
434 if ((shmem_page_offset + page_length) > PAGE_SIZE)
435 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700436
Daniel Vetter692a5762012-03-25 19:47:34 +0200437 if (obj->pages) {
438 page = obj->pages[offset >> PAGE_SHIFT];
439 release_page = 0;
440 } else {
441 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
442 if (IS_ERR(page)) {
443 ret = PTR_ERR(page);
444 goto out;
445 }
446 release_page = 1;
Jesper Juhlb65552f2011-06-12 20:53:44 +0000447 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100448
Daniel Vetter8461d222011-12-14 13:57:32 +0100449 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
450 (page_to_phys(page) & (1 << 17)) != 0;
451
Daniel Vetterd174bd62012-03-25 19:47:40 +0200452 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
453 user_data, page_do_bit17_swizzling,
454 needs_clflush);
455 if (ret == 0)
456 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700457
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200458 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200459 page_cache_get(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200460 mutex_unlock(&dev->struct_mutex);
461
Daniel Vetter96d79b52012-03-25 19:47:36 +0200462 if (!prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200463 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200464 /* Userspace is tricking us, but we've already clobbered
465 * its pages with the prefault and promised to write the
466 * data up to the first fault. Hence ignore any errors
467 * and just continue. */
468 (void)ret;
469 prefaulted = 1;
470 }
471
Daniel Vetterd174bd62012-03-25 19:47:40 +0200472 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
473 user_data, page_do_bit17_swizzling,
474 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700475
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200476 mutex_lock(&dev->struct_mutex);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100477 page_cache_release(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200478next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100479 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200480 if (release_page)
481 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100482
Daniel Vetter8461d222011-12-14 13:57:32 +0100483 if (ret) {
484 ret = -EFAULT;
485 goto out;
486 }
487
Eric Anholteb014592009-03-10 11:44:52 -0700488 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100489 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700490 offset += page_length;
491 }
492
Chris Wilson4f27b752010-10-14 15:26:45 +0100493out:
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200494 if (hit_slowpath) {
495 /* Fixup: Kill any reinstated backing storage pages */
496 if (obj->madv == __I915_MADV_PURGED)
497 i915_gem_object_truncate(obj);
498 }
Eric Anholteb014592009-03-10 11:44:52 -0700499
500 return ret;
501}
502
Eric Anholt673a3942008-07-30 12:06:12 -0700503/**
504 * Reads data from the object referenced by handle.
505 *
506 * On error, the contents of *data are undefined.
507 */
508int
509i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000510 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700511{
512 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000513 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100514 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700515
Chris Wilson51311d02010-11-17 09:10:42 +0000516 if (args->size == 0)
517 return 0;
518
519 if (!access_ok(VERIFY_WRITE,
520 (char __user *)(uintptr_t)args->data_ptr,
521 args->size))
522 return -EFAULT;
523
Chris Wilson4f27b752010-10-14 15:26:45 +0100524 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100525 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100526 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700527
Chris Wilson05394f32010-11-08 19:18:58 +0000528 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000529 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100530 ret = -ENOENT;
531 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100532 }
Eric Anholt673a3942008-07-30 12:06:12 -0700533
Chris Wilson7dcd2492010-09-26 20:21:44 +0100534 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000535 if (args->offset > obj->base.size ||
536 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100537 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100538 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100539 }
540
Chris Wilsondb53a302011-02-03 11:57:46 +0000541 trace_i915_gem_object_pread(obj, args->offset, args->size);
542
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200543 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700544
Chris Wilson35b62a82010-09-26 20:23:38 +0100545out:
Chris Wilson05394f32010-11-08 19:18:58 +0000546 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100547unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100548 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700549 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700550}
551
Keith Packard0839ccb2008-10-30 19:38:48 -0700552/* This is the fast write path which cannot handle
553 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700554 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700555
Keith Packard0839ccb2008-10-30 19:38:48 -0700556static inline int
557fast_user_write(struct io_mapping *mapping,
558 loff_t page_base, int page_offset,
559 char __user *user_data,
560 int length)
561{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700562 void __iomem *vaddr_atomic;
563 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700564 unsigned long unwritten;
565
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700566 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700567 /* We can use the cpu mem copy function because this is X86. */
568 vaddr = (void __force*)vaddr_atomic + page_offset;
569 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700570 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700571 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100572 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700573}
574
Eric Anholt3de09aa2009-03-09 09:42:23 -0700575/**
576 * This is the fast pwrite path, where we copy the data directly from the
577 * user into the GTT, uncached.
578 */
Eric Anholt673a3942008-07-30 12:06:12 -0700579static int
Chris Wilson05394f32010-11-08 19:18:58 +0000580i915_gem_gtt_pwrite_fast(struct drm_device *dev,
581 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700582 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000583 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700584{
Keith Packard0839ccb2008-10-30 19:38:48 -0700585 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700586 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700588 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200589 int page_offset, page_length, ret;
590
591 ret = i915_gem_object_pin(obj, 0, true);
592 if (ret)
593 goto out;
594
595 ret = i915_gem_object_set_to_gtt_domain(obj, true);
596 if (ret)
597 goto out_unpin;
598
599 ret = i915_gem_object_put_fence(obj);
600 if (ret)
601 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700602
603 user_data = (char __user *) (uintptr_t) args->data_ptr;
604 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700605
Chris Wilson05394f32010-11-08 19:18:58 +0000606 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700607
608 while (remain > 0) {
609 /* Operation in this page
610 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700611 * page_base = page offset within aperture
612 * page_offset = offset within page
613 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700614 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100615 page_base = offset & PAGE_MASK;
616 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700617 page_length = remain;
618 if ((page_offset + remain) > PAGE_SIZE)
619 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700620
Keith Packard0839ccb2008-10-30 19:38:48 -0700621 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700622 * source page isn't available. Return the error and we'll
623 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700624 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100625 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200626 page_offset, user_data, page_length)) {
627 ret = -EFAULT;
628 goto out_unpin;
629 }
Eric Anholt673a3942008-07-30 12:06:12 -0700630
Keith Packard0839ccb2008-10-30 19:38:48 -0700631 remain -= page_length;
632 user_data += page_length;
633 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700634 }
Eric Anholt673a3942008-07-30 12:06:12 -0700635
Daniel Vetter935aaa62012-03-25 19:47:35 +0200636out_unpin:
637 i915_gem_object_unpin(obj);
638out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700639 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700640}
641
Daniel Vetterd174bd62012-03-25 19:47:40 +0200642/* Per-page copy function for the shmem pwrite fastpath.
643 * Flushes invalid cachelines before writing to the target if
644 * needs_clflush_before is set and flushes out any written cachelines after
645 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700646static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200647shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
648 char __user *user_data,
649 bool page_do_bit17_swizzling,
650 bool needs_clflush_before,
651 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700652{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200653 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700654 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700655
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200656 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200657 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700658
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659 vaddr = kmap_atomic(page);
660 if (needs_clflush_before)
661 drm_clflush_virt_range(vaddr + shmem_page_offset,
662 page_length);
663 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
664 user_data,
665 page_length);
666 if (needs_clflush_after)
667 drm_clflush_virt_range(vaddr + shmem_page_offset,
668 page_length);
669 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700670
671 return ret;
672}
673
Daniel Vetterd174bd62012-03-25 19:47:40 +0200674/* Only difference to the fast-path function is that this can handle bit17
675 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700676static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200677shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
678 char __user *user_data,
679 bool page_do_bit17_swizzling,
680 bool needs_clflush_before,
681 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700682{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200683 char *vaddr;
684 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700685
Daniel Vetterd174bd62012-03-25 19:47:40 +0200686 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200687 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200688 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
689 page_length,
690 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200691 if (page_do_bit17_swizzling)
692 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100693 user_data,
694 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200695 else
696 ret = __copy_from_user(vaddr + shmem_page_offset,
697 user_data,
698 page_length);
699 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200700 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
701 page_length,
702 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200703 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100704
Daniel Vetterd174bd62012-03-25 19:47:40 +0200705 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700706}
707
Eric Anholt40123c12009-03-09 13:42:30 -0700708static int
Daniel Vettere244a442012-03-25 19:47:28 +0200709i915_gem_shmem_pwrite(struct drm_device *dev,
710 struct drm_i915_gem_object *obj,
711 struct drm_i915_gem_pwrite *args,
712 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700713{
Chris Wilson05394f32010-11-08 19:18:58 +0000714 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700715 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100716 loff_t offset;
717 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100718 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100719 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200720 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200721 int needs_clflush_after = 0;
722 int needs_clflush_before = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200723 int release_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700724
Daniel Vetter8c599672011-12-14 13:57:31 +0100725 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholt40123c12009-03-09 13:42:30 -0700726 remain = args->size;
727
Daniel Vetter8c599672011-12-14 13:57:31 +0100728 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700729
Daniel Vetter58642882012-03-25 19:47:37 +0200730 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
731 /* If we're not in the cpu write domain, set ourself into the gtt
732 * write domain and manually flush cachelines (if required). This
733 * optimizes for the case when the gpu will use the data
734 * right away and we therefore have to clflush anyway. */
735 if (obj->cache_level == I915_CACHE_NONE)
736 needs_clflush_after = 1;
737 ret = i915_gem_object_set_to_gtt_domain(obj, true);
738 if (ret)
739 return ret;
740 }
741 /* Same trick applies for invalidate partially written cachelines before
742 * writing. */
743 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
744 && obj->cache_level == I915_CACHE_NONE)
745 needs_clflush_before = 1;
746
Eric Anholt40123c12009-03-09 13:42:30 -0700747 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000748 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700749
750 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100751 struct page *page;
Daniel Vetter58642882012-03-25 19:47:37 +0200752 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100753
Eric Anholt40123c12009-03-09 13:42:30 -0700754 /* Operation in this page
755 *
Eric Anholt40123c12009-03-09 13:42:30 -0700756 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700757 * page_length = bytes to copy for this page
758 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100759 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700760
761 page_length = remain;
762 if ((shmem_page_offset + page_length) > PAGE_SIZE)
763 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700764
Daniel Vetter58642882012-03-25 19:47:37 +0200765 /* If we don't overwrite a cacheline completely we need to be
766 * careful to have up-to-date data by first clflushing. Don't
767 * overcomplicate things and flush the entire patch. */
768 partial_cacheline_write = needs_clflush_before &&
769 ((shmem_page_offset | page_length)
770 & (boot_cpu_data.x86_clflush_size - 1));
771
Daniel Vetter692a5762012-03-25 19:47:34 +0200772 if (obj->pages) {
773 page = obj->pages[offset >> PAGE_SHIFT];
774 release_page = 0;
775 } else {
776 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
777 if (IS_ERR(page)) {
778 ret = PTR_ERR(page);
779 goto out;
780 }
781 release_page = 1;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100782 }
783
Daniel Vetter8c599672011-12-14 13:57:31 +0100784 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
785 (page_to_phys(page) & (1 << 17)) != 0;
786
Daniel Vetterd174bd62012-03-25 19:47:40 +0200787 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
788 user_data, page_do_bit17_swizzling,
789 partial_cacheline_write,
790 needs_clflush_after);
791 if (ret == 0)
792 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700793
Daniel Vettere244a442012-03-25 19:47:28 +0200794 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200795 page_cache_get(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200796 mutex_unlock(&dev->struct_mutex);
797
Daniel Vetterd174bd62012-03-25 19:47:40 +0200798 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
799 user_data, page_do_bit17_swizzling,
800 partial_cacheline_write,
801 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700802
Daniel Vettere244a442012-03-25 19:47:28 +0200803 mutex_lock(&dev->struct_mutex);
Daniel Vetter692a5762012-03-25 19:47:34 +0200804 page_cache_release(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200805next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100806 set_page_dirty(page);
807 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200808 if (release_page)
809 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100810
Daniel Vetter8c599672011-12-14 13:57:31 +0100811 if (ret) {
812 ret = -EFAULT;
813 goto out;
814 }
815
Eric Anholt40123c12009-03-09 13:42:30 -0700816 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100817 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700818 offset += page_length;
819 }
820
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100821out:
Daniel Vettere244a442012-03-25 19:47:28 +0200822 if (hit_slowpath) {
823 /* Fixup: Kill any reinstated backing storage pages */
824 if (obj->madv == __I915_MADV_PURGED)
825 i915_gem_object_truncate(obj);
826 /* and flush dirty cachelines in case the object isn't in the cpu write
827 * domain anymore. */
828 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
829 i915_gem_clflush_object(obj);
830 intel_gtt_chipset_flush();
831 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100832 }
Eric Anholt40123c12009-03-09 13:42:30 -0700833
Daniel Vetter58642882012-03-25 19:47:37 +0200834 if (needs_clflush_after)
835 intel_gtt_chipset_flush();
836
Eric Anholt40123c12009-03-09 13:42:30 -0700837 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700838}
839
840/**
841 * Writes data to the object referenced by handle.
842 *
843 * On error, the contents of the buffer that were to be modified are undefined.
844 */
845int
846i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100847 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700848{
849 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000850 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000851 int ret;
852
853 if (args->size == 0)
854 return 0;
855
856 if (!access_ok(VERIFY_READ,
857 (char __user *)(uintptr_t)args->data_ptr,
858 args->size))
859 return -EFAULT;
860
Daniel Vetterf56f8212012-03-25 19:47:41 +0200861 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
862 args->size);
Chris Wilson51311d02010-11-17 09:10:42 +0000863 if (ret)
864 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700865
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100866 ret = i915_mutex_lock_interruptible(dev);
867 if (ret)
868 return ret;
869
Chris Wilson05394f32010-11-08 19:18:58 +0000870 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000871 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100872 ret = -ENOENT;
873 goto unlock;
874 }
Eric Anholt673a3942008-07-30 12:06:12 -0700875
Chris Wilson7dcd2492010-09-26 20:21:44 +0100876 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000877 if (args->offset > obj->base.size ||
878 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100879 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100880 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100881 }
882
Chris Wilsondb53a302011-02-03 11:57:46 +0000883 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
884
Daniel Vetter935aaa62012-03-25 19:47:35 +0200885 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700886 /* We can only do the GTT pwrite on untiled buffers, as otherwise
887 * it would end up going through the fenced access, and we'll get
888 * different detiling behavior between reading and writing.
889 * pread/pwrite currently are reading and writing from the CPU
890 * perspective, requiring manual detiling by the client.
891 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100892 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100893 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100894 goto out;
895 }
896
897 if (obj->gtt_space &&
Daniel Vetter3ae53782012-03-25 19:47:33 +0200898 obj->cache_level == I915_CACHE_NONE &&
Daniel Vetterc07496f2012-04-13 15:51:51 +0200899 obj->tiling_mode == I915_TILING_NONE &&
Daniel Vetterffc62972012-03-25 19:47:38 +0200900 obj->map_and_fenceable &&
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100901 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100902 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200903 /* Note that the gtt paths might fail with non-page-backed user
904 * pointers (e.g. gtt mappings when moving data between
905 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700906 }
Eric Anholt673a3942008-07-30 12:06:12 -0700907
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100908 if (ret == -EFAULT)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200909 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100910
Chris Wilson35b62a82010-09-26 20:23:38 +0100911out:
Chris Wilson05394f32010-11-08 19:18:58 +0000912 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100913unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100914 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700915 return ret;
916}
917
918/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800919 * Called when user space prepares to use an object with the CPU, either
920 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700921 */
922int
923i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000924 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700925{
926 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000927 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800928 uint32_t read_domains = args->read_domains;
929 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700930 int ret;
931
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800932 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100933 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800934 return -EINVAL;
935
Chris Wilson21d509e2009-06-06 09:46:02 +0100936 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800937 return -EINVAL;
938
939 /* Having something in the write domain implies it's in the read
940 * domain, and only that read domain. Enforce that in the request.
941 */
942 if (write_domain != 0 && read_domains != write_domain)
943 return -EINVAL;
944
Chris Wilson76c1dec2010-09-25 11:22:51 +0100945 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100946 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100947 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700948
Chris Wilson05394f32010-11-08 19:18:58 +0000949 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000950 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100951 ret = -ENOENT;
952 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100953 }
Jesse Barnes652c3932009-08-17 13:31:43 -0700954
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800955 if (read_domains & I915_GEM_DOMAIN_GTT) {
956 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -0800957
958 /* Silently promote "you're not bound, there was nothing to do"
959 * to success, since the client was just asking us to
960 * make sure everything was done.
961 */
962 if (ret == -EINVAL)
963 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800964 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -0800965 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800966 }
967
Chris Wilson05394f32010-11-08 19:18:58 +0000968 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100969unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700970 mutex_unlock(&dev->struct_mutex);
971 return ret;
972}
973
974/**
975 * Called when user space has done writes to this buffer
976 */
977int
978i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000979 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700980{
981 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000982 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -0700983 int ret = 0;
984
Chris Wilson76c1dec2010-09-25 11:22:51 +0100985 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100986 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100987 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100988
Chris Wilson05394f32010-11-08 19:18:58 +0000989 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000990 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100991 ret = -ENOENT;
992 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -0700993 }
994
Eric Anholt673a3942008-07-30 12:06:12 -0700995 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +0000996 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -0800997 i915_gem_object_flush_cpu_write_domain(obj);
998
Chris Wilson05394f32010-11-08 19:18:58 +0000999 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001000unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001001 mutex_unlock(&dev->struct_mutex);
1002 return ret;
1003}
1004
1005/**
1006 * Maps the contents of an object, returning the address it is mapped
1007 * into.
1008 *
1009 * While the mapping holds a reference on the contents of the object, it doesn't
1010 * imply a ref on the object itself.
1011 */
1012int
1013i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001014 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001015{
1016 struct drm_i915_gem_mmap *args = data;
1017 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001018 unsigned long addr;
1019
Chris Wilson05394f32010-11-08 19:18:58 +00001020 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001021 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001022 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001023
Eric Anholt673a3942008-07-30 12:06:12 -07001024 down_write(&current->mm->mmap_sem);
1025 addr = do_mmap(obj->filp, 0, args->size,
1026 PROT_READ | PROT_WRITE, MAP_SHARED,
1027 args->offset);
1028 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001029 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001030 if (IS_ERR((void *)addr))
1031 return addr;
1032
1033 args->addr_ptr = (uint64_t) addr;
1034
1035 return 0;
1036}
1037
Jesse Barnesde151cf2008-11-12 10:03:55 -08001038/**
1039 * i915_gem_fault - fault a page into the GTT
1040 * vma: VMA in question
1041 * vmf: fault info
1042 *
1043 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1044 * from userspace. The fault handler takes care of binding the object to
1045 * the GTT (if needed), allocating and programming a fence register (again,
1046 * only if needed based on whether the old reg is still valid or the object
1047 * is tiled) and inserting a new PTE into the faulting process.
1048 *
1049 * Note that the faulting process may involve evicting existing objects
1050 * from the GTT and/or fence registers to make room. So performance may
1051 * suffer if the GTT working set is large or there are few fence registers
1052 * left.
1053 */
1054int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1055{
Chris Wilson05394f32010-11-08 19:18:58 +00001056 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1057 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001058 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001059 pgoff_t page_offset;
1060 unsigned long pfn;
1061 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001062 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001063
1064 /* We don't use vmf->pgoff since that has the fake offset */
1065 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1066 PAGE_SHIFT;
1067
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001068 ret = i915_mutex_lock_interruptible(dev);
1069 if (ret)
1070 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001071
Chris Wilsondb53a302011-02-03 11:57:46 +00001072 trace_i915_gem_object_fault(obj, page_offset, true, write);
1073
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001074 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001075 if (!obj->map_and_fenceable) {
1076 ret = i915_gem_object_unbind(obj);
1077 if (ret)
1078 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001079 }
Chris Wilson05394f32010-11-08 19:18:58 +00001080 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001081 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001082 if (ret)
1083 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001084
Eric Anholte92d03b2011-06-14 16:43:09 -07001085 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1086 if (ret)
1087 goto unlock;
1088 }
Chris Wilson4a684a42010-10-28 14:44:08 +01001089
Daniel Vetter74898d72012-02-15 23:50:22 +01001090 if (!obj->has_global_gtt_mapping)
1091 i915_gem_gtt_bind_object(obj, obj->cache_level);
1092
Chris Wilson06d98132012-04-17 15:31:24 +01001093 ret = i915_gem_object_get_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001094 if (ret)
1095 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001096
Chris Wilson05394f32010-11-08 19:18:58 +00001097 if (i915_gem_object_is_inactive(obj))
1098 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001099
Chris Wilson6299f992010-11-24 12:23:44 +00001100 obj->fault_mappable = true;
1101
Chris Wilson05394f32010-11-08 19:18:58 +00001102 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001103 page_offset;
1104
1105 /* Finally, remap it using the new GTT offset */
1106 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001107unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001108 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001109out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001110 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001111 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +00001112 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001113 /* Give the error handler a chance to run and move the
1114 * objects off the GPU active list. Next time we service the
1115 * fault, we should be able to transition the page into the
1116 * GTT without touching the GPU (and so avoid further
1117 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1118 * with coherency, just lost writes.
1119 */
Chris Wilson045e7692010-11-07 09:18:22 +00001120 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001121 case 0:
1122 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001123 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +01001124 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001125 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001126 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001127 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001128 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001129 }
1130}
1131
1132/**
Chris Wilson901782b2009-07-10 08:18:50 +01001133 * i915_gem_release_mmap - remove physical page mappings
1134 * @obj: obj in question
1135 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001136 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001137 * relinquish ownership of the pages back to the system.
1138 *
1139 * It is vital that we remove the page mapping if we have mapped a tiled
1140 * object through the GTT and then lose the fence register due to
1141 * resource pressure. Similarly if the object has been moved out of the
1142 * aperture, than pages mapped into userspace must be revoked. Removing the
1143 * mapping will then trigger a page fault on the next user access, allowing
1144 * fixup by i915_gem_fault().
1145 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001146void
Chris Wilson05394f32010-11-08 19:18:58 +00001147i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001148{
Chris Wilson6299f992010-11-24 12:23:44 +00001149 if (!obj->fault_mappable)
1150 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001151
Chris Wilsonf6e47882011-03-20 21:09:12 +00001152 if (obj->base.dev->dev_mapping)
1153 unmap_mapping_range(obj->base.dev->dev_mapping,
1154 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1155 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001156
Chris Wilson6299f992010-11-24 12:23:44 +00001157 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001158}
1159
Chris Wilson92b88ae2010-11-09 11:47:32 +00001160static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001161i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001162{
Chris Wilsone28f8712011-07-18 13:11:49 -07001163 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001164
1165 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001166 tiling_mode == I915_TILING_NONE)
1167 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001168
1169 /* Previous chips need a power-of-two fence region when tiling */
1170 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001171 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001172 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001173 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001174
Chris Wilsone28f8712011-07-18 13:11:49 -07001175 while (gtt_size < size)
1176 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001177
Chris Wilsone28f8712011-07-18 13:11:49 -07001178 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001179}
1180
Jesse Barnesde151cf2008-11-12 10:03:55 -08001181/**
1182 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1183 * @obj: object to check
1184 *
1185 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001186 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001187 */
1188static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001189i915_gem_get_gtt_alignment(struct drm_device *dev,
1190 uint32_t size,
1191 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001192{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001193 /*
1194 * Minimum alignment is 4k (GTT page size), but might be greater
1195 * if a fence register is needed for the object.
1196 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001197 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001198 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001199 return 4096;
1200
1201 /*
1202 * Previous chips need to be aligned to the size of the smallest
1203 * fence register that can contain the object.
1204 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001205 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001206}
1207
Daniel Vetter5e783302010-11-14 22:32:36 +01001208/**
1209 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1210 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001211 * @dev: the device
1212 * @size: size of the object
1213 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001214 *
1215 * Return the required GTT alignment for an object, only taking into account
1216 * unfenced tiled surface requirements.
1217 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001218uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001219i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1220 uint32_t size,
1221 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001222{
Daniel Vetter5e783302010-11-14 22:32:36 +01001223 /*
1224 * Minimum alignment is 4k (GTT page size) for sane hw.
1225 */
1226 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001227 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001228 return 4096;
1229
Chris Wilsone28f8712011-07-18 13:11:49 -07001230 /* Previous hardware however needs to be aligned to a power-of-two
1231 * tile height. The simplest method for determining this is to reuse
1232 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001233 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001234 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001235}
1236
Jesse Barnesde151cf2008-11-12 10:03:55 -08001237int
Dave Airlieff72145b2011-02-07 12:16:14 +10001238i915_gem_mmap_gtt(struct drm_file *file,
1239 struct drm_device *dev,
1240 uint32_t handle,
1241 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001242{
Chris Wilsonda761a62010-10-27 17:37:08 +01001243 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001244 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001245 int ret;
1246
Chris Wilson76c1dec2010-09-25 11:22:51 +01001247 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001248 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001249 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001250
Dave Airlieff72145b2011-02-07 12:16:14 +10001251 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001252 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001253 ret = -ENOENT;
1254 goto unlock;
1255 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001256
Chris Wilson05394f32010-11-08 19:18:58 +00001257 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001258 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001259 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001260 }
1261
Chris Wilson05394f32010-11-08 19:18:58 +00001262 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001263 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001264 ret = -EINVAL;
1265 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001266 }
1267
Chris Wilson05394f32010-11-08 19:18:58 +00001268 if (!obj->base.map_list.map) {
Rob Clarkb464e9a2011-08-10 08:09:08 -05001269 ret = drm_gem_create_mmap_offset(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001270 if (ret)
1271 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001272 }
1273
Dave Airlieff72145b2011-02-07 12:16:14 +10001274 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001275
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001276out:
Chris Wilson05394f32010-11-08 19:18:58 +00001277 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001278unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001279 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001280 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001281}
1282
Dave Airlieff72145b2011-02-07 12:16:14 +10001283/**
1284 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1285 * @dev: DRM device
1286 * @data: GTT mapping ioctl data
1287 * @file: GEM object info
1288 *
1289 * Simply returns the fake offset to userspace so it can mmap it.
1290 * The mmap call will end up in drm_gem_mmap(), which will set things
1291 * up so we can get faults in the handler above.
1292 *
1293 * The fault handler will take care of binding the object into the GTT
1294 * (since it may have been evicted to make room for something), allocating
1295 * a fence register, and mapping the appropriate aperture address into
1296 * userspace.
1297 */
1298int
1299i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1300 struct drm_file *file)
1301{
1302 struct drm_i915_gem_mmap_gtt *args = data;
1303
Dave Airlieff72145b2011-02-07 12:16:14 +10001304 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1305}
1306
1307
Chris Wilsone5281cc2010-10-28 13:45:36 +01001308static int
Chris Wilson05394f32010-11-08 19:18:58 +00001309i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001310 gfp_t gfpmask)
1311{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001312 int page_count, i;
1313 struct address_space *mapping;
1314 struct inode *inode;
1315 struct page *page;
1316
1317 /* Get the list of pages out of our struct file. They'll be pinned
1318 * at this point until we release them.
1319 */
Chris Wilson05394f32010-11-08 19:18:58 +00001320 page_count = obj->base.size / PAGE_SIZE;
1321 BUG_ON(obj->pages != NULL);
1322 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1323 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001324 return -ENOMEM;
1325
Chris Wilson05394f32010-11-08 19:18:58 +00001326 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001327 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001328 gfpmask |= mapping_gfp_mask(mapping);
1329
Chris Wilsone5281cc2010-10-28 13:45:36 +01001330 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001331 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001332 if (IS_ERR(page))
1333 goto err_pages;
1334
Chris Wilson05394f32010-11-08 19:18:58 +00001335 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001336 }
1337
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001338 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilsone5281cc2010-10-28 13:45:36 +01001339 i915_gem_object_do_bit_17_swizzle(obj);
1340
1341 return 0;
1342
1343err_pages:
1344 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001345 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001346
Chris Wilson05394f32010-11-08 19:18:58 +00001347 drm_free_large(obj->pages);
1348 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001349 return PTR_ERR(page);
1350}
1351
Chris Wilson5cdf5882010-09-27 15:51:07 +01001352static void
Chris Wilson05394f32010-11-08 19:18:58 +00001353i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001354{
Chris Wilson05394f32010-11-08 19:18:58 +00001355 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001356 int i;
1357
Chris Wilson05394f32010-11-08 19:18:58 +00001358 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001359
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001360 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001361 i915_gem_object_save_bit_17_swizzle(obj);
1362
Chris Wilson05394f32010-11-08 19:18:58 +00001363 if (obj->madv == I915_MADV_DONTNEED)
1364 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001365
1366 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001367 if (obj->dirty)
1368 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001369
Chris Wilson05394f32010-11-08 19:18:58 +00001370 if (obj->madv == I915_MADV_WILLNEED)
1371 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001372
Chris Wilson05394f32010-11-08 19:18:58 +00001373 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001374 }
Chris Wilson05394f32010-11-08 19:18:58 +00001375 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001376
Chris Wilson05394f32010-11-08 19:18:58 +00001377 drm_free_large(obj->pages);
1378 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001379}
1380
Chris Wilson54cf91d2010-11-25 18:00:26 +00001381void
Chris Wilson05394f32010-11-08 19:18:58 +00001382i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001383 struct intel_ring_buffer *ring,
1384 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001385{
Chris Wilson05394f32010-11-08 19:18:58 +00001386 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001387 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001388
Zou Nan hai852835f2010-05-21 09:08:56 +08001389 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001390 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001391
1392 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001393 if (!obj->active) {
1394 drm_gem_object_reference(&obj->base);
1395 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001396 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001397
Eric Anholt673a3942008-07-30 12:06:12 -07001398 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001399 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1400 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001401
Chris Wilson05394f32010-11-08 19:18:58 +00001402 obj->last_rendering_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001403
Chris Wilsoncaea7472010-11-12 13:53:37 +00001404 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001405 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001406
Chris Wilson7dd49062012-03-21 10:48:18 +00001407 /* Bump MRU to take account of the delayed flush */
1408 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1409 struct drm_i915_fence_reg *reg;
1410
1411 reg = &dev_priv->fence_regs[obj->fence_reg];
1412 list_move_tail(&reg->lru_list,
1413 &dev_priv->mm.fence_list);
1414 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001415 }
1416}
1417
1418static void
1419i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1420{
1421 list_del_init(&obj->ring_list);
1422 obj->last_rendering_seqno = 0;
Daniel Vetter15a13bb2012-04-12 01:27:57 +02001423 obj->last_fenced_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001424}
1425
Eric Anholtce44b0e2008-11-06 16:00:31 -08001426static void
Chris Wilson05394f32010-11-08 19:18:58 +00001427i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001428{
Chris Wilson05394f32010-11-08 19:18:58 +00001429 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001430 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001431
Chris Wilson05394f32010-11-08 19:18:58 +00001432 BUG_ON(!obj->active);
1433 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001434
1435 i915_gem_object_move_off_active(obj);
1436}
1437
1438static void
1439i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1440{
1441 struct drm_device *dev = obj->base.dev;
1442 struct drm_i915_private *dev_priv = dev->dev_private;
1443
Chris Wilson1b502472012-04-24 15:47:30 +01001444 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001445
1446 BUG_ON(!list_empty(&obj->gpu_write_list));
1447 BUG_ON(!obj->active);
1448 obj->ring = NULL;
1449
1450 i915_gem_object_move_off_active(obj);
1451 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001452
1453 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001454 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001455 drm_gem_object_unreference(&obj->base);
1456
1457 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001458}
Eric Anholt673a3942008-07-30 12:06:12 -07001459
Chris Wilson963b4832009-09-20 23:03:54 +01001460/* Immediately discard the backing storage */
1461static void
Chris Wilson05394f32010-11-08 19:18:58 +00001462i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001463{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001464 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001465
Chris Wilsonae9fed62010-08-07 11:01:30 +01001466 /* Our goal here is to return as much of the memory as
1467 * is possible back to the system as we are called from OOM.
1468 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001469 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001470 */
Chris Wilson05394f32010-11-08 19:18:58 +00001471 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001472 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001473
Chris Wilsona14917e2012-02-24 21:13:38 +00001474 if (obj->base.map_list.map)
1475 drm_gem_free_mmap_offset(&obj->base);
1476
Chris Wilson05394f32010-11-08 19:18:58 +00001477 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001478}
1479
1480static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001481i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001482{
Chris Wilson05394f32010-11-08 19:18:58 +00001483 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001484}
1485
Eric Anholt673a3942008-07-30 12:06:12 -07001486static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001487i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1488 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001489{
Chris Wilson05394f32010-11-08 19:18:58 +00001490 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001491
Chris Wilson05394f32010-11-08 19:18:58 +00001492 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001493 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001494 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001495 if (obj->base.write_domain & flush_domains) {
1496 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001497
Chris Wilson05394f32010-11-08 19:18:58 +00001498 obj->base.write_domain = 0;
1499 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001500 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001501 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001502
Daniel Vetter63560392010-02-19 11:51:59 +01001503 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001504 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001505 old_write_domain);
1506 }
1507 }
1508}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001509
Daniel Vetter53d227f2012-01-25 16:32:49 +01001510static u32
1511i915_gem_get_seqno(struct drm_device *dev)
1512{
1513 drm_i915_private_t *dev_priv = dev->dev_private;
1514 u32 seqno = dev_priv->next_seqno;
1515
1516 /* reserve 0 for non-seqno */
1517 if (++dev_priv->next_seqno == 0)
1518 dev_priv->next_seqno = 1;
1519
1520 return seqno;
1521}
1522
1523u32
1524i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1525{
1526 if (ring->outstanding_lazy_request == 0)
1527 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1528
1529 return ring->outstanding_lazy_request;
1530}
1531
Chris Wilson3cce4692010-10-27 16:11:02 +01001532int
Chris Wilsondb53a302011-02-03 11:57:46 +00001533i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001534 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001535 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001536{
Chris Wilsondb53a302011-02-03 11:57:46 +00001537 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001538 uint32_t seqno;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001539 u32 request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001540 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001541 int ret;
1542
1543 BUG_ON(request == NULL);
Daniel Vetter53d227f2012-01-25 16:32:49 +01001544 seqno = i915_gem_next_request_seqno(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001545
Chris Wilsona71d8d92012-02-15 11:25:36 +00001546 /* Record the position of the start of the request so that
1547 * should we detect the updated seqno part-way through the
1548 * GPU processing the request, we never over-estimate the
1549 * position of the head.
1550 */
1551 request_ring_position = intel_ring_get_tail(ring);
1552
Chris Wilson3cce4692010-10-27 16:11:02 +01001553 ret = ring->add_request(ring, &seqno);
1554 if (ret)
1555 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001556
Chris Wilsondb53a302011-02-03 11:57:46 +00001557 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001558
1559 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001560 request->ring = ring;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001561 request->tail = request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001562 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001563 was_empty = list_empty(&ring->request_list);
1564 list_add_tail(&request->list, &ring->request_list);
1565
Chris Wilsondb53a302011-02-03 11:57:46 +00001566 if (file) {
1567 struct drm_i915_file_private *file_priv = file->driver_priv;
1568
Chris Wilson1c255952010-09-26 11:03:27 +01001569 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001570 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001571 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001572 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001573 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001574 }
Eric Anholt673a3942008-07-30 12:06:12 -07001575
Daniel Vetter5391d0c2012-01-25 14:03:57 +01001576 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00001577
Ben Gamarif65d9422009-09-14 17:48:44 -04001578 if (!dev_priv->mm.suspended) {
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001579 if (i915_enable_hangcheck) {
1580 mod_timer(&dev_priv->hangcheck_timer,
1581 jiffies +
1582 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1583 }
Ben Gamarif65d9422009-09-14 17:48:44 -04001584 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001585 queue_delayed_work(dev_priv->wq,
1586 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001587 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001588 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001589}
1590
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001591static inline void
1592i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001593{
Chris Wilson1c255952010-09-26 11:03:27 +01001594 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001595
Chris Wilson1c255952010-09-26 11:03:27 +01001596 if (!file_priv)
1597 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001598
Chris Wilson1c255952010-09-26 11:03:27 +01001599 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001600 if (request->file_priv) {
1601 list_del(&request->client_list);
1602 request->file_priv = NULL;
1603 }
Chris Wilson1c255952010-09-26 11:03:27 +01001604 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001605}
1606
Chris Wilsondfaae392010-09-22 10:31:52 +01001607static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1608 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001609{
Chris Wilsondfaae392010-09-22 10:31:52 +01001610 while (!list_empty(&ring->request_list)) {
1611 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001612
Chris Wilsondfaae392010-09-22 10:31:52 +01001613 request = list_first_entry(&ring->request_list,
1614 struct drm_i915_gem_request,
1615 list);
1616
1617 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001618 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001619 kfree(request);
1620 }
1621
1622 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001623 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001624
Chris Wilson05394f32010-11-08 19:18:58 +00001625 obj = list_first_entry(&ring->active_list,
1626 struct drm_i915_gem_object,
1627 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001628
Chris Wilson05394f32010-11-08 19:18:58 +00001629 obj->base.write_domain = 0;
1630 list_del_init(&obj->gpu_write_list);
1631 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001632 }
Eric Anholt673a3942008-07-30 12:06:12 -07001633}
1634
Chris Wilson312817a2010-11-22 11:50:11 +00001635static void i915_gem_reset_fences(struct drm_device *dev)
1636{
1637 struct drm_i915_private *dev_priv = dev->dev_private;
1638 int i;
1639
Daniel Vetter4b9de732011-10-09 21:52:02 +02001640 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00001641 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001642
Chris Wilsonada726c2012-04-17 15:31:32 +01001643 i915_gem_write_fence(dev, i, NULL);
Chris Wilson7d2cb392010-11-27 17:38:29 +00001644
Chris Wilsonada726c2012-04-17 15:31:32 +01001645 if (reg->obj)
1646 i915_gem_object_fence_lost(reg->obj);
Chris Wilson7d2cb392010-11-27 17:38:29 +00001647
Chris Wilsonada726c2012-04-17 15:31:32 +01001648 reg->pin_count = 0;
1649 reg->obj = NULL;
1650 INIT_LIST_HEAD(&reg->lru_list);
Chris Wilson312817a2010-11-22 11:50:11 +00001651 }
Chris Wilsonada726c2012-04-17 15:31:32 +01001652
1653 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson312817a2010-11-22 11:50:11 +00001654}
1655
Chris Wilson069efc12010-09-30 16:53:18 +01001656void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001657{
Chris Wilsondfaae392010-09-22 10:31:52 +01001658 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001659 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001660 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001661
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001662 for (i = 0; i < I915_NUM_RINGS; i++)
1663 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001664
1665 /* Remove anything from the flushing lists. The GPU cache is likely
1666 * to be lost on reset along with the data, so simply move the
1667 * lost bo to the inactive list.
1668 */
1669 while (!list_empty(&dev_priv->mm.flushing_list)) {
Akshay Joshi0206e352011-08-16 15:34:10 -04001670 obj = list_first_entry(&dev_priv->mm.flushing_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001671 struct drm_i915_gem_object,
1672 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001673
Chris Wilson05394f32010-11-08 19:18:58 +00001674 obj->base.write_domain = 0;
1675 list_del_init(&obj->gpu_write_list);
1676 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001677 }
Chris Wilson9375e442010-09-19 12:21:28 +01001678
Chris Wilsondfaae392010-09-22 10:31:52 +01001679 /* Move everything out of the GPU domains to ensure we do any
1680 * necessary invalidation upon reuse.
1681 */
Chris Wilson05394f32010-11-08 19:18:58 +00001682 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001683 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001684 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001685 {
Chris Wilson05394f32010-11-08 19:18:58 +00001686 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001687 }
Chris Wilson069efc12010-09-30 16:53:18 +01001688
1689 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001690 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001691}
1692
1693/**
1694 * This function clears the request list as sequence numbers are passed.
1695 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001696void
Chris Wilsondb53a302011-02-03 11:57:46 +00001697i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001698{
Eric Anholt673a3942008-07-30 12:06:12 -07001699 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001700 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001701
Chris Wilsondb53a302011-02-03 11:57:46 +00001702 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001703 return;
1704
Chris Wilsondb53a302011-02-03 11:57:46 +00001705 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001706
Chris Wilson78501ea2010-10-27 12:18:21 +01001707 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001708
Chris Wilson076e2c02011-01-21 10:07:18 +00001709 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001710 if (seqno >= ring->sync_seqno[i])
1711 ring->sync_seqno[i] = 0;
1712
Zou Nan hai852835f2010-05-21 09:08:56 +08001713 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001714 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001715
Zou Nan hai852835f2010-05-21 09:08:56 +08001716 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001717 struct drm_i915_gem_request,
1718 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001719
Chris Wilsondfaae392010-09-22 10:31:52 +01001720 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001721 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001722
Chris Wilsondb53a302011-02-03 11:57:46 +00001723 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001724 /* We know the GPU must have read the request to have
1725 * sent us the seqno + interrupt, so use the position
1726 * of tail of the request to update the last known position
1727 * of the GPU head.
1728 */
1729 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001730
1731 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001732 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001733 kfree(request);
1734 }
1735
1736 /* Move any buffers on the active list that are no longer referenced
1737 * by the ringbuffer to the flushing/inactive lists as appropriate.
1738 */
1739 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001740 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001741
Akshay Joshi0206e352011-08-16 15:34:10 -04001742 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001743 struct drm_i915_gem_object,
1744 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001745
Chris Wilson05394f32010-11-08 19:18:58 +00001746 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001747 break;
1748
Chris Wilson05394f32010-11-08 19:18:58 +00001749 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001750 i915_gem_object_move_to_flushing(obj);
1751 else
1752 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001753 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001754
Chris Wilsondb53a302011-02-03 11:57:46 +00001755 if (unlikely(ring->trace_irq_seqno &&
1756 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001757 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001758 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001759 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001760
Chris Wilsondb53a302011-02-03 11:57:46 +00001761 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001762}
1763
1764void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001765i915_gem_retire_requests(struct drm_device *dev)
1766{
1767 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001768 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001769
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001770 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilsondb53a302011-02-03 11:57:46 +00001771 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001772}
1773
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001774static void
Eric Anholt673a3942008-07-30 12:06:12 -07001775i915_gem_retire_work_handler(struct work_struct *work)
1776{
1777 drm_i915_private_t *dev_priv;
1778 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001779 bool idle;
1780 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001781
1782 dev_priv = container_of(work, drm_i915_private_t,
1783 mm.retire_work.work);
1784 dev = dev_priv->dev;
1785
Chris Wilson891b48c2010-09-29 12:26:37 +01001786 /* Come back later if the device is busy... */
1787 if (!mutex_trylock(&dev->struct_mutex)) {
1788 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1789 return;
1790 }
1791
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001792 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001793
Chris Wilson0a587052011-01-09 21:05:44 +00001794 /* Send a periodic flush down the ring so we don't hold onto GEM
1795 * objects indefinitely.
1796 */
1797 idle = true;
1798 for (i = 0; i < I915_NUM_RINGS; i++) {
1799 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1800
1801 if (!list_empty(&ring->gpu_write_list)) {
1802 struct drm_i915_gem_request *request;
1803 int ret;
1804
Chris Wilsondb53a302011-02-03 11:57:46 +00001805 ret = i915_gem_flush_ring(ring,
1806 0, I915_GEM_GPU_DOMAINS);
Chris Wilson0a587052011-01-09 21:05:44 +00001807 request = kzalloc(sizeof(*request), GFP_KERNEL);
1808 if (ret || request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00001809 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00001810 kfree(request);
1811 }
1812
1813 idle &= list_empty(&ring->request_list);
1814 }
1815
1816 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001817 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001818
Eric Anholt673a3942008-07-30 12:06:12 -07001819 mutex_unlock(&dev->struct_mutex);
1820}
1821
Chris Wilsondb53a302011-02-03 11:57:46 +00001822/**
1823 * Waits for a sequence number to be signaled, and cleans up the
1824 * request and object lists appropriately for that event.
1825 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001826int
Chris Wilsondb53a302011-02-03 11:57:46 +00001827i915_wait_request(struct intel_ring_buffer *ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001828 uint32_t seqno,
1829 bool do_retire)
Eric Anholt673a3942008-07-30 12:06:12 -07001830{
Chris Wilsondb53a302011-02-03 11:57:46 +00001831 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001832 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001833 int ret = 0;
1834
1835 BUG_ON(seqno == 0);
1836
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001837 if (atomic_read(&dev_priv->mm.wedged)) {
1838 struct completion *x = &dev_priv->error_completion;
1839 bool recovery_complete;
1840 unsigned long flags;
1841
1842 /* Give the error handler a chance to run. */
1843 spin_lock_irqsave(&x->wait.lock, flags);
1844 recovery_complete = x->done > 0;
1845 spin_unlock_irqrestore(&x->wait.lock, flags);
1846
1847 return recovery_complete ? -EIO : -EAGAIN;
1848 }
Ben Gamariffed1d02009-09-14 17:48:41 -04001849
Chris Wilson5d97eb62010-11-10 20:40:02 +00001850 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01001851 struct drm_i915_gem_request *request;
1852
1853 request = kzalloc(sizeof(*request), GFP_KERNEL);
1854 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01001855 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01001856
Chris Wilsondb53a302011-02-03 11:57:46 +00001857 ret = i915_add_request(ring, NULL, request);
Chris Wilson3cce4692010-10-27 16:11:02 +01001858 if (ret) {
1859 kfree(request);
1860 return ret;
1861 }
1862
1863 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01001864 }
1865
Chris Wilson78501ea2010-10-27 12:18:21 +01001866 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001867 if (HAS_PCH_SPLIT(ring->dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001868 ier = I915_READ(DEIER) | I915_READ(GTIER);
Jesse Barnes23e3f9b2012-03-28 13:39:39 -07001869 else if (IS_VALLEYVIEW(ring->dev))
1870 ier = I915_READ(GTIER) | I915_READ(VLV_IER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001871 else
1872 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001873 if (!ier) {
1874 DRM_ERROR("something (likely vbetool) disabled "
1875 "interrupts, re-enabling\n");
Chris Wilsonf01c22f2011-06-28 11:48:51 +01001876 ring->dev->driver->irq_preinstall(ring->dev);
1877 ring->dev->driver->irq_postinstall(ring->dev);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001878 }
1879
Chris Wilsondb53a302011-02-03 11:57:46 +00001880 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001881
Chris Wilsonb2223492010-10-27 15:27:33 +01001882 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001883 if (ring->irq_get(ring)) {
Chris Wilsonce453d82011-02-21 14:43:56 +00001884 if (dev_priv->mm.interruptible)
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001885 ret = wait_event_interruptible(ring->irq_queue,
1886 i915_seqno_passed(ring->get_seqno(ring), seqno)
1887 || atomic_read(&dev_priv->mm.wedged));
1888 else
1889 wait_event(ring->irq_queue,
1890 i915_seqno_passed(ring->get_seqno(ring), seqno)
1891 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001892
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001893 ring->irq_put(ring);
Eric Anholte959b5d2011-12-22 14:55:01 -08001894 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1895 seqno) ||
1896 atomic_read(&dev_priv->mm.wedged), 3000))
Chris Wilsonb5ba1772010-12-14 12:17:15 +00001897 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01001898 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001899
Chris Wilsondb53a302011-02-03 11:57:46 +00001900 trace_i915_gem_request_wait_end(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001901 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001902 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001903 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001904
Eric Anholt673a3942008-07-30 12:06:12 -07001905 /* Directly dispatch request retiring. While we have the work queue
1906 * to handle this, the waiter on a request often wants an associated
1907 * buffer to have made it to the inactive list, and we would need
1908 * a separate wait queue to handle that.
1909 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001910 if (ret == 0 && do_retire)
Chris Wilsondb53a302011-02-03 11:57:46 +00001911 i915_gem_retire_requests_ring(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001912
1913 return ret;
1914}
1915
Daniel Vetter48764bf2009-09-15 22:57:32 +02001916/**
Eric Anholt673a3942008-07-30 12:06:12 -07001917 * Ensures that all rendering to the object has completed and the object is
1918 * safe to unbind from the GTT or access from the CPU.
1919 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001920int
Chris Wilsonce453d82011-02-21 14:43:56 +00001921i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001922{
Eric Anholt673a3942008-07-30 12:06:12 -07001923 int ret;
1924
Eric Anholte47c68e2008-11-14 13:35:19 -08001925 /* This function only exists to support waiting for existing rendering,
1926 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001927 */
Chris Wilson05394f32010-11-08 19:18:58 +00001928 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001929
1930 /* If there is rendering queued on the buffer being evicted, wait for
1931 * it.
1932 */
Chris Wilson05394f32010-11-08 19:18:58 +00001933 if (obj->active) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001934 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
1935 true);
Chris Wilson2cf34d72010-09-14 13:03:28 +01001936 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07001937 return ret;
1938 }
1939
1940 return 0;
1941}
1942
Ben Widawsky5816d642012-04-11 11:18:19 -07001943/**
1944 * i915_gem_object_sync - sync an object to a ring.
1945 *
1946 * @obj: object which may be in use on another ring.
1947 * @to: ring we wish to use the object on. May be NULL.
1948 *
1949 * This code is meant to abstract object synchronization with the GPU.
1950 * Calling with NULL implies synchronizing the object with the CPU
1951 * rather than a particular GPU ring.
1952 *
1953 * Returns 0 if successful, else propagates up the lower layer error.
1954 */
Ben Widawsky2911a352012-04-05 14:47:36 -07001955int
1956i915_gem_object_sync(struct drm_i915_gem_object *obj,
1957 struct intel_ring_buffer *to)
1958{
1959 struct intel_ring_buffer *from = obj->ring;
1960 u32 seqno;
1961 int ret, idx;
1962
1963 if (from == NULL || to == from)
1964 return 0;
1965
Ben Widawsky5816d642012-04-11 11:18:19 -07001966 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Ben Widawsky2911a352012-04-05 14:47:36 -07001967 return i915_gem_object_wait_rendering(obj);
1968
1969 idx = intel_ring_sync_index(from, to);
1970
1971 seqno = obj->last_rendering_seqno;
1972 if (seqno <= from->sync_seqno[idx])
1973 return 0;
1974
1975 if (seqno == from->outstanding_lazy_request) {
1976 struct drm_i915_gem_request *request;
1977
1978 request = kzalloc(sizeof(*request), GFP_KERNEL);
1979 if (request == NULL)
1980 return -ENOMEM;
1981
1982 ret = i915_add_request(from, NULL, request);
1983 if (ret) {
1984 kfree(request);
1985 return ret;
1986 }
1987
1988 seqno = request->seqno;
1989 }
1990
Ben Widawsky2911a352012-04-05 14:47:36 -07001991
Ben Widawsky1500f7e2012-04-11 11:18:21 -07001992 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07001993 if (!ret)
1994 from->sync_seqno[idx] = seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07001995
Ben Widawskye3a5a222012-04-11 11:18:20 -07001996 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07001997}
1998
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01001999static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2000{
2001 u32 old_write_domain, old_read_domains;
2002
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002003 /* Act a barrier for all accesses through the GTT */
2004 mb();
2005
2006 /* Force a pagefault for domain tracking on next user access */
2007 i915_gem_release_mmap(obj);
2008
Keith Packardb97c3d92011-06-24 21:02:59 -07002009 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2010 return;
2011
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002012 old_read_domains = obj->base.read_domains;
2013 old_write_domain = obj->base.write_domain;
2014
2015 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2016 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2017
2018 trace_i915_gem_object_change_domain(obj,
2019 old_read_domains,
2020 old_write_domain);
2021}
2022
Eric Anholt673a3942008-07-30 12:06:12 -07002023/**
2024 * Unbinds an object from the GTT aperture.
2025 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002026int
Chris Wilson05394f32010-11-08 19:18:58 +00002027i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002028{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002029 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002030 int ret = 0;
2031
Chris Wilson05394f32010-11-08 19:18:58 +00002032 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002033 return 0;
2034
Chris Wilson05394f32010-11-08 19:18:58 +00002035 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07002036 DRM_ERROR("Attempting to unbind pinned buffer\n");
2037 return -EINVAL;
2038 }
2039
Chris Wilsona8198ee2011-04-13 22:04:09 +01002040 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002041 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002042 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002043 /* Continue on if we fail due to EIO, the GPU is hung so we
2044 * should be safe and we need to cleanup or else we might
2045 * cause memory corruption through use-after-free.
2046 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002047
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002048 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002049
2050 /* Move the object to the CPU domain to ensure that
2051 * any possible CPU writes while it's not in the GTT
2052 * are flushed when we go to remap it.
2053 */
2054 if (ret == 0)
2055 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2056 if (ret == -ERESTARTSYS)
2057 return ret;
Chris Wilson812ed4922010-09-30 15:08:57 +01002058 if (ret) {
Chris Wilsona8198ee2011-04-13 22:04:09 +01002059 /* In the event of a disaster, abandon all caches and
2060 * hope for the best.
2061 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002062 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002063 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01002064 }
Eric Anholt673a3942008-07-30 12:06:12 -07002065
Daniel Vetter96b47b62009-12-15 17:50:00 +01002066 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002067 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002068 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002069 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002070
Chris Wilsondb53a302011-02-03 11:57:46 +00002071 trace_i915_gem_object_unbind(obj);
2072
Daniel Vetter74898d72012-02-15 23:50:22 +01002073 if (obj->has_global_gtt_mapping)
2074 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002075 if (obj->has_aliasing_ppgtt_mapping) {
2076 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2077 obj->has_aliasing_ppgtt_mapping = 0;
2078 }
Daniel Vetter74163902012-02-15 23:50:21 +01002079 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002080
Chris Wilsone5281cc2010-10-28 13:45:36 +01002081 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002082
Chris Wilson6299f992010-11-24 12:23:44 +00002083 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002084 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002085 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002086 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002087
Chris Wilson05394f32010-11-08 19:18:58 +00002088 drm_mm_put_block(obj->gtt_space);
2089 obj->gtt_space = NULL;
2090 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002091
Chris Wilson05394f32010-11-08 19:18:58 +00002092 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002093 i915_gem_object_truncate(obj);
2094
Chris Wilson8dc17752010-07-23 23:18:51 +01002095 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002096}
2097
Chris Wilson88241782011-01-07 17:09:48 +00002098int
Chris Wilsondb53a302011-02-03 11:57:46 +00002099i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00002100 uint32_t invalidate_domains,
2101 uint32_t flush_domains)
2102{
Chris Wilson88241782011-01-07 17:09:48 +00002103 int ret;
2104
Chris Wilson36d527d2011-03-19 22:26:49 +00002105 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2106 return 0;
2107
Chris Wilsondb53a302011-02-03 11:57:46 +00002108 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2109
Chris Wilson88241782011-01-07 17:09:48 +00002110 ret = ring->flush(ring, invalidate_domains, flush_domains);
2111 if (ret)
2112 return ret;
2113
Chris Wilson36d527d2011-03-19 22:26:49 +00002114 if (flush_domains & I915_GEM_GPU_DOMAINS)
2115 i915_gem_process_flushing_list(ring, flush_domains);
2116
Chris Wilson88241782011-01-07 17:09:48 +00002117 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002118}
2119
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002120static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
Chris Wilsona56ba562010-09-28 10:07:56 +01002121{
Chris Wilson88241782011-01-07 17:09:48 +00002122 int ret;
2123
Chris Wilson395b70b2010-10-28 21:28:46 +01002124 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002125 return 0;
2126
Chris Wilson88241782011-01-07 17:09:48 +00002127 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002128 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002129 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002130 if (ret)
2131 return ret;
2132 }
2133
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002134 return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2135 do_retire);
Chris Wilsona56ba562010-09-28 10:07:56 +01002136}
2137
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002138int i915_gpu_idle(struct drm_device *dev, bool do_retire)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002139{
2140 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002141 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002142
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002143 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002144 for (i = 0; i < I915_NUM_RINGS; i++) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002145 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002146 if (ret)
2147 return ret;
2148 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002149
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002150 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002151}
2152
Chris Wilson9ce079e2012-04-17 15:31:30 +01002153static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2154 struct drm_i915_gem_object *obj)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002155{
Eric Anholt4e901fd2009-10-26 16:44:17 -07002156 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002157 uint64_t val;
2158
Chris Wilson9ce079e2012-04-17 15:31:30 +01002159 if (obj) {
2160 u32 size = obj->gtt_space->size;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002161
Chris Wilson9ce079e2012-04-17 15:31:30 +01002162 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2163 0xfffff000) << 32;
2164 val |= obj->gtt_offset & 0xfffff000;
2165 val |= (uint64_t)((obj->stride / 128) - 1) <<
2166 SANDYBRIDGE_FENCE_PITCH_SHIFT;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002167
Chris Wilson9ce079e2012-04-17 15:31:30 +01002168 if (obj->tiling_mode == I915_TILING_Y)
2169 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2170 val |= I965_FENCE_REG_VALID;
2171 } else
2172 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002173
Chris Wilson9ce079e2012-04-17 15:31:30 +01002174 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2175 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002176}
2177
Chris Wilson9ce079e2012-04-17 15:31:30 +01002178static void i965_write_fence_reg(struct drm_device *dev, int reg,
2179 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002180{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002181 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002182 uint64_t val;
2183
Chris Wilson9ce079e2012-04-17 15:31:30 +01002184 if (obj) {
2185 u32 size = obj->gtt_space->size;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002186
Chris Wilson9ce079e2012-04-17 15:31:30 +01002187 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2188 0xfffff000) << 32;
2189 val |= obj->gtt_offset & 0xfffff000;
2190 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2191 if (obj->tiling_mode == I915_TILING_Y)
2192 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2193 val |= I965_FENCE_REG_VALID;
2194 } else
2195 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002196
Chris Wilson9ce079e2012-04-17 15:31:30 +01002197 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2198 POSTING_READ(FENCE_REG_965_0 + reg * 8);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002199}
2200
Chris Wilson9ce079e2012-04-17 15:31:30 +01002201static void i915_write_fence_reg(struct drm_device *dev, int reg,
2202 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002203{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002204 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002205 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002206
Chris Wilson9ce079e2012-04-17 15:31:30 +01002207 if (obj) {
2208 u32 size = obj->gtt_space->size;
2209 int pitch_val;
2210 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002211
Chris Wilson9ce079e2012-04-17 15:31:30 +01002212 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2213 (size & -size) != size ||
2214 (obj->gtt_offset & (size - 1)),
2215 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2216 obj->gtt_offset, obj->map_and_fenceable, size);
2217
2218 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2219 tile_width = 128;
2220 else
2221 tile_width = 512;
2222
2223 /* Note: pitch better be a power of two tile widths */
2224 pitch_val = obj->stride / tile_width;
2225 pitch_val = ffs(pitch_val) - 1;
2226
2227 val = obj->gtt_offset;
2228 if (obj->tiling_mode == I915_TILING_Y)
2229 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2230 val |= I915_FENCE_SIZE_BITS(size);
2231 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2232 val |= I830_FENCE_REG_VALID;
2233 } else
2234 val = 0;
2235
2236 if (reg < 8)
2237 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002238 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002239 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002240
Chris Wilson9ce079e2012-04-17 15:31:30 +01002241 I915_WRITE(reg, val);
2242 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002243}
2244
Chris Wilson9ce079e2012-04-17 15:31:30 +01002245static void i830_write_fence_reg(struct drm_device *dev, int reg,
2246 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002247{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002248 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002249 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002250
Chris Wilson9ce079e2012-04-17 15:31:30 +01002251 if (obj) {
2252 u32 size = obj->gtt_space->size;
2253 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002254
Chris Wilson9ce079e2012-04-17 15:31:30 +01002255 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2256 (size & -size) != size ||
2257 (obj->gtt_offset & (size - 1)),
2258 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2259 obj->gtt_offset, size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002260
Chris Wilson9ce079e2012-04-17 15:31:30 +01002261 pitch_val = obj->stride / 128;
2262 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002263
Chris Wilson9ce079e2012-04-17 15:31:30 +01002264 val = obj->gtt_offset;
2265 if (obj->tiling_mode == I915_TILING_Y)
2266 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2267 val |= I830_FENCE_SIZE_BITS(size);
2268 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2269 val |= I830_FENCE_REG_VALID;
2270 } else
2271 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002272
Chris Wilson9ce079e2012-04-17 15:31:30 +01002273 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2274 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2275}
2276
2277static void i915_gem_write_fence(struct drm_device *dev, int reg,
2278 struct drm_i915_gem_object *obj)
2279{
2280 switch (INTEL_INFO(dev)->gen) {
2281 case 7:
2282 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2283 case 5:
2284 case 4: i965_write_fence_reg(dev, reg, obj); break;
2285 case 3: i915_write_fence_reg(dev, reg, obj); break;
2286 case 2: i830_write_fence_reg(dev, reg, obj); break;
2287 default: break;
2288 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002289}
2290
Chris Wilson61050802012-04-17 15:31:31 +01002291static inline int fence_number(struct drm_i915_private *dev_priv,
2292 struct drm_i915_fence_reg *fence)
2293{
2294 return fence - dev_priv->fence_regs;
2295}
2296
2297static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2298 struct drm_i915_fence_reg *fence,
2299 bool enable)
2300{
2301 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2302 int reg = fence_number(dev_priv, fence);
2303
2304 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2305
2306 if (enable) {
2307 obj->fence_reg = reg;
2308 fence->obj = obj;
2309 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2310 } else {
2311 obj->fence_reg = I915_FENCE_REG_NONE;
2312 fence->obj = NULL;
2313 list_del_init(&fence->lru_list);
2314 }
2315}
2316
Chris Wilsond9e86c02010-11-10 16:40:20 +00002317static int
Chris Wilsona360bb12012-04-17 15:31:25 +01002318i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002319{
2320 int ret;
2321
2322 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002323 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilson1c293ea2012-04-17 15:31:27 +01002324 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00002325 0, obj->base.write_domain);
2326 if (ret)
2327 return ret;
2328 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002329
2330 obj->fenced_gpu_access = false;
2331 }
2332
Chris Wilson1c293ea2012-04-17 15:31:27 +01002333 if (obj->last_fenced_seqno) {
Chris Wilson18991842012-04-17 15:31:29 +01002334 ret = i915_wait_request(obj->ring,
2335 obj->last_fenced_seqno,
Chris Wilson14415742012-04-17 15:31:33 +01002336 false);
Chris Wilson18991842012-04-17 15:31:29 +01002337 if (ret)
2338 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002339
2340 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002341 }
2342
Chris Wilson63256ec2011-01-04 18:42:07 +00002343 /* Ensure that all CPU reads are completed before installing a fence
2344 * and all writes before removing the fence.
2345 */
2346 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2347 mb();
2348
Chris Wilsond9e86c02010-11-10 16:40:20 +00002349 return 0;
2350}
2351
2352int
2353i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2354{
Chris Wilson61050802012-04-17 15:31:31 +01002355 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002356 int ret;
2357
Chris Wilsona360bb12012-04-17 15:31:25 +01002358 ret = i915_gem_object_flush_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002359 if (ret)
2360 return ret;
2361
Chris Wilson61050802012-04-17 15:31:31 +01002362 if (obj->fence_reg == I915_FENCE_REG_NONE)
2363 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002364
Chris Wilson61050802012-04-17 15:31:31 +01002365 i915_gem_object_update_fence(obj,
2366 &dev_priv->fence_regs[obj->fence_reg],
2367 false);
2368 i915_gem_object_fence_lost(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002369
2370 return 0;
2371}
2372
2373static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01002374i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01002375{
Daniel Vetterae3db242010-02-19 11:51:58 +01002376 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01002377 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002378 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002379
2380 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002381 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002382 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2383 reg = &dev_priv->fence_regs[i];
2384 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002385 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002386
Chris Wilson1690e1e2011-12-14 13:57:08 +01002387 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002388 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002389 }
2390
Chris Wilsond9e86c02010-11-10 16:40:20 +00002391 if (avail == NULL)
2392 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002393
2394 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002395 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002396 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002397 continue;
2398
Chris Wilson8fe301a2012-04-17 15:31:28 +01002399 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002400 }
2401
Chris Wilson8fe301a2012-04-17 15:31:28 +01002402 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002403}
2404
Jesse Barnesde151cf2008-11-12 10:03:55 -08002405/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002406 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002407 * @obj: object to map through a fence reg
2408 *
2409 * When mapping objects through the GTT, userspace wants to be able to write
2410 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002411 * This function walks the fence regs looking for a free one for @obj,
2412 * stealing one if it can't find any.
2413 *
2414 * It then sets up the reg based on the object's properties: address, pitch
2415 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002416 *
2417 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002418 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002419int
Chris Wilson06d98132012-04-17 15:31:24 +01002420i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002421{
Chris Wilson05394f32010-11-08 19:18:58 +00002422 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002423 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01002424 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002425 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002426 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002427
Chris Wilson14415742012-04-17 15:31:33 +01002428 /* Have we updated the tiling parameters upon the object and so
2429 * will need to serialise the write to the associated fence register?
2430 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002431 if (obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01002432 ret = i915_gem_object_flush_fence(obj);
2433 if (ret)
2434 return ret;
2435 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002436
Chris Wilsond9e86c02010-11-10 16:40:20 +00002437 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002438 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2439 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002440 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01002441 list_move_tail(&reg->lru_list,
2442 &dev_priv->mm.fence_list);
2443 return 0;
2444 }
2445 } else if (enable) {
2446 reg = i915_find_fence_reg(dev);
2447 if (reg == NULL)
2448 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002449
Chris Wilson14415742012-04-17 15:31:33 +01002450 if (reg->obj) {
2451 struct drm_i915_gem_object *old = reg->obj;
2452
2453 ret = i915_gem_object_flush_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00002454 if (ret)
2455 return ret;
2456
Chris Wilson14415742012-04-17 15:31:33 +01002457 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00002458 }
Chris Wilson14415742012-04-17 15:31:33 +01002459 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07002460 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002461
Chris Wilson14415742012-04-17 15:31:33 +01002462 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002463 obj->fence_dirty = false;
Chris Wilson14415742012-04-17 15:31:33 +01002464
Chris Wilson9ce079e2012-04-17 15:31:30 +01002465 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002466}
2467
2468/**
Eric Anholt673a3942008-07-30 12:06:12 -07002469 * Finds free space in the GTT aperture and binds the object there.
2470 */
2471static int
Chris Wilson05394f32010-11-08 19:18:58 +00002472i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002473 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002474 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002475{
Chris Wilson05394f32010-11-08 19:18:58 +00002476 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002477 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002478 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002479 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002480 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002481 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002482 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002483
Chris Wilson05394f32010-11-08 19:18:58 +00002484 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002485 DRM_ERROR("Attempting to bind a purgeable object\n");
2486 return -EINVAL;
2487 }
2488
Chris Wilsone28f8712011-07-18 13:11:49 -07002489 fence_size = i915_gem_get_gtt_size(dev,
2490 obj->base.size,
2491 obj->tiling_mode);
2492 fence_alignment = i915_gem_get_gtt_alignment(dev,
2493 obj->base.size,
2494 obj->tiling_mode);
2495 unfenced_alignment =
2496 i915_gem_get_unfenced_gtt_alignment(dev,
2497 obj->base.size,
2498 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002499
Eric Anholt673a3942008-07-30 12:06:12 -07002500 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002501 alignment = map_and_fenceable ? fence_alignment :
2502 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002503 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002504 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2505 return -EINVAL;
2506 }
2507
Chris Wilson05394f32010-11-08 19:18:58 +00002508 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002509
Chris Wilson654fc602010-05-27 13:18:21 +01002510 /* If the object is bigger than the entire aperture, reject it early
2511 * before evicting everything in a vain attempt to find space.
2512 */
Chris Wilson05394f32010-11-08 19:18:58 +00002513 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002514 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002515 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2516 return -E2BIG;
2517 }
2518
Eric Anholt673a3942008-07-30 12:06:12 -07002519 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002520 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002521 free_space =
2522 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002523 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002524 dev_priv->mm.gtt_mappable_end,
2525 0);
2526 else
2527 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002528 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002529
2530 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002531 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002532 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002533 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002534 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002535 dev_priv->mm.gtt_mappable_end,
2536 0);
2537 else
Chris Wilson05394f32010-11-08 19:18:58 +00002538 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002539 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002540 }
Chris Wilson05394f32010-11-08 19:18:58 +00002541 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002542 /* If the gtt is empty and we're still having trouble
2543 * fitting our object in, we're out of memory.
2544 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002545 ret = i915_gem_evict_something(dev, size, alignment,
2546 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002547 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002548 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002549
Eric Anholt673a3942008-07-30 12:06:12 -07002550 goto search_free;
2551 }
2552
Chris Wilsone5281cc2010-10-28 13:45:36 +01002553 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002554 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002555 drm_mm_put_block(obj->gtt_space);
2556 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002557
2558 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002559 /* first try to reclaim some memory by clearing the GTT */
2560 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002561 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002562 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002563 if (gfpmask) {
2564 gfpmask = 0;
2565 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002566 }
2567
Chris Wilson809b6332011-01-10 17:33:15 +00002568 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002569 }
2570
2571 goto search_free;
2572 }
2573
Eric Anholt673a3942008-07-30 12:06:12 -07002574 return ret;
2575 }
2576
Daniel Vetter74163902012-02-15 23:50:21 +01002577 ret = i915_gem_gtt_prepare_object(obj);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002578 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002579 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002580 drm_mm_put_block(obj->gtt_space);
2581 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002582
Chris Wilson809b6332011-01-10 17:33:15 +00002583 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002584 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002585
2586 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002587 }
Eric Anholt673a3942008-07-30 12:06:12 -07002588
Daniel Vetter0ebb9822012-02-15 23:50:24 +01002589 if (!dev_priv->mm.aliasing_ppgtt)
2590 i915_gem_gtt_bind_object(obj, obj->cache_level);
Eric Anholt673a3942008-07-30 12:06:12 -07002591
Chris Wilson6299f992010-11-24 12:23:44 +00002592 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002593 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002594
Eric Anholt673a3942008-07-30 12:06:12 -07002595 /* Assert that the object is not currently in any GPU domain. As it
2596 * wasn't in the GTT, there shouldn't be any way it could have been in
2597 * a GPU cache
2598 */
Chris Wilson05394f32010-11-08 19:18:58 +00002599 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2600 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002601
Chris Wilson6299f992010-11-24 12:23:44 +00002602 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002603
Daniel Vetter75e9e912010-11-04 17:11:09 +01002604 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002605 obj->gtt_space->size == fence_size &&
Akshay Joshi0206e352011-08-16 15:34:10 -04002606 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002607
Daniel Vetter75e9e912010-11-04 17:11:09 +01002608 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002609 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002610
Chris Wilson05394f32010-11-08 19:18:58 +00002611 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002612
Chris Wilsondb53a302011-02-03 11:57:46 +00002613 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002614 return 0;
2615}
2616
2617void
Chris Wilson05394f32010-11-08 19:18:58 +00002618i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002619{
Eric Anholt673a3942008-07-30 12:06:12 -07002620 /* If we don't have a page list set up, then we're not pinned
2621 * to GPU, and we can ignore the cache flush because it'll happen
2622 * again at bind time.
2623 */
Chris Wilson05394f32010-11-08 19:18:58 +00002624 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002625 return;
2626
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002627 /* If the GPU is snooping the contents of the CPU cache,
2628 * we do not need to manually clear the CPU cache lines. However,
2629 * the caches are only snooped when the render cache is
2630 * flushed/invalidated. As we always have to emit invalidations
2631 * and flushes when moving into and out of the RENDER domain, correct
2632 * snooping behaviour occurs naturally as the result of our domain
2633 * tracking.
2634 */
2635 if (obj->cache_level != I915_CACHE_NONE)
2636 return;
2637
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002638 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002639
Chris Wilson05394f32010-11-08 19:18:58 +00002640 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002641}
2642
Eric Anholte47c68e2008-11-14 13:35:19 -08002643/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002644static int
Chris Wilson3619df02010-11-28 15:37:17 +00002645i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002646{
Chris Wilson05394f32010-11-08 19:18:58 +00002647 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002648 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002649
2650 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002651 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002652}
2653
2654/** Flushes the GTT write domain for the object if it's dirty. */
2655static void
Chris Wilson05394f32010-11-08 19:18:58 +00002656i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002657{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002658 uint32_t old_write_domain;
2659
Chris Wilson05394f32010-11-08 19:18:58 +00002660 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002661 return;
2662
Chris Wilson63256ec2011-01-04 18:42:07 +00002663 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002664 * to it immediately go to main memory as far as we know, so there's
2665 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002666 *
2667 * However, we do have to enforce the order so that all writes through
2668 * the GTT land before any writes to the device, such as updates to
2669 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002670 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002671 wmb();
2672
Chris Wilson05394f32010-11-08 19:18:58 +00002673 old_write_domain = obj->base.write_domain;
2674 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002675
2676 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002677 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002678 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002679}
2680
2681/** Flushes the CPU write domain for the object if it's dirty. */
2682static void
Chris Wilson05394f32010-11-08 19:18:58 +00002683i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002684{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002685 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002686
Chris Wilson05394f32010-11-08 19:18:58 +00002687 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002688 return;
2689
2690 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002691 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002692 old_write_domain = obj->base.write_domain;
2693 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002694
2695 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002696 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002697 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002698}
2699
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002700/**
2701 * Moves a single object to the GTT read, and possibly write domain.
2702 *
2703 * This function returns when the move is complete, including waiting on
2704 * flushes to occur.
2705 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002706int
Chris Wilson20217462010-11-23 15:26:33 +00002707i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002708{
Chris Wilson8325a092012-04-24 15:52:35 +01002709 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002710 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002711 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002712
Eric Anholt02354392008-11-26 13:58:13 -08002713 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002714 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002715 return -EINVAL;
2716
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002717 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2718 return 0;
2719
Chris Wilson88241782011-01-07 17:09:48 +00002720 ret = i915_gem_object_flush_gpu_write_domain(obj);
2721 if (ret)
2722 return ret;
2723
Chris Wilson87ca9c82010-12-02 09:42:56 +00002724 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002725 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002726 if (ret)
2727 return ret;
2728 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002729
Chris Wilson72133422010-09-13 23:56:38 +01002730 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002731
Chris Wilson05394f32010-11-08 19:18:58 +00002732 old_write_domain = obj->base.write_domain;
2733 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002734
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002735 /* It should now be out of any other write domains, and we can update
2736 * the domain values for our changes.
2737 */
Chris Wilson05394f32010-11-08 19:18:58 +00002738 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2739 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002740 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002741 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2742 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2743 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002744 }
2745
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002746 trace_i915_gem_object_change_domain(obj,
2747 old_read_domains,
2748 old_write_domain);
2749
Chris Wilson8325a092012-04-24 15:52:35 +01002750 /* And bump the LRU for this access */
2751 if (i915_gem_object_is_inactive(obj))
2752 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2753
Eric Anholte47c68e2008-11-14 13:35:19 -08002754 return 0;
2755}
2756
Chris Wilsone4ffd172011-04-04 09:44:39 +01002757int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2758 enum i915_cache_level cache_level)
2759{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002760 struct drm_device *dev = obj->base.dev;
2761 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsone4ffd172011-04-04 09:44:39 +01002762 int ret;
2763
2764 if (obj->cache_level == cache_level)
2765 return 0;
2766
2767 if (obj->pin_count) {
2768 DRM_DEBUG("can not change the cache level of pinned objects\n");
2769 return -EBUSY;
2770 }
2771
2772 if (obj->gtt_space) {
2773 ret = i915_gem_object_finish_gpu(obj);
2774 if (ret)
2775 return ret;
2776
2777 i915_gem_object_finish_gtt(obj);
2778
2779 /* Before SandyBridge, you could not use tiling or fence
2780 * registers with snooped memory, so relinquish any fences
2781 * currently pointing to our region in the aperture.
2782 */
2783 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2784 ret = i915_gem_object_put_fence(obj);
2785 if (ret)
2786 return ret;
2787 }
2788
Daniel Vetter74898d72012-02-15 23:50:22 +01002789 if (obj->has_global_gtt_mapping)
2790 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002791 if (obj->has_aliasing_ppgtt_mapping)
2792 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2793 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01002794 }
2795
2796 if (cache_level == I915_CACHE_NONE) {
2797 u32 old_read_domains, old_write_domain;
2798
2799 /* If we're coming from LLC cached, then we haven't
2800 * actually been tracking whether the data is in the
2801 * CPU cache or not, since we only allow one bit set
2802 * in obj->write_domain and have been skipping the clflushes.
2803 * Just set it to the CPU cache for now.
2804 */
2805 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2806 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2807
2808 old_read_domains = obj->base.read_domains;
2809 old_write_domain = obj->base.write_domain;
2810
2811 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2812 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2813
2814 trace_i915_gem_object_change_domain(obj,
2815 old_read_domains,
2816 old_write_domain);
2817 }
2818
2819 obj->cache_level = cache_level;
2820 return 0;
2821}
2822
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002823/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002824 * Prepare buffer for display plane (scanout, cursors, etc).
2825 * Can be called from an uninterruptible phase (modesetting) and allows
2826 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002827 */
2828int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002829i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2830 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00002831 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002832{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002833 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002834 int ret;
2835
Chris Wilson88241782011-01-07 17:09:48 +00002836 ret = i915_gem_object_flush_gpu_write_domain(obj);
2837 if (ret)
2838 return ret;
2839
Chris Wilson0be73282010-12-06 14:36:27 +00002840 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07002841 ret = i915_gem_object_sync(obj, pipelined);
2842 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002843 return ret;
2844 }
2845
Eric Anholta7ef0642011-03-29 16:59:54 -07002846 /* The display engine is not coherent with the LLC cache on gen6. As
2847 * a result, we make sure that the pinning that is about to occur is
2848 * done with uncached PTEs. This is lowest common denominator for all
2849 * chipsets.
2850 *
2851 * However for gen6+, we could do better by using the GFDT bit instead
2852 * of uncaching, which would allow us to flush all the LLC-cached data
2853 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2854 */
2855 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2856 if (ret)
2857 return ret;
2858
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002859 /* As the user may map the buffer once pinned in the display plane
2860 * (e.g. libkms for the bootup splash), we have to ensure that we
2861 * always use map_and_fenceable for all scanout buffers.
2862 */
2863 ret = i915_gem_object_pin(obj, alignment, true);
2864 if (ret)
2865 return ret;
2866
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002867 i915_gem_object_flush_cpu_write_domain(obj);
2868
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002869 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00002870 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002871
2872 /* It should now be out of any other write domains, and we can update
2873 * the domain values for our changes.
2874 */
2875 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00002876 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002877
2878 trace_i915_gem_object_change_domain(obj,
2879 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002880 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002881
2882 return 0;
2883}
2884
Chris Wilson85345512010-11-13 09:49:11 +00002885int
Chris Wilsona8198ee2011-04-13 22:04:09 +01002886i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00002887{
Chris Wilson88241782011-01-07 17:09:48 +00002888 int ret;
2889
Chris Wilsona8198ee2011-04-13 22:04:09 +01002890 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00002891 return 0;
2892
Chris Wilson88241782011-01-07 17:09:48 +00002893 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002894 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00002895 if (ret)
2896 return ret;
2897 }
Chris Wilson85345512010-11-13 09:49:11 +00002898
Chris Wilsonc501ae72011-12-14 13:57:23 +01002899 ret = i915_gem_object_wait_rendering(obj);
2900 if (ret)
2901 return ret;
2902
Chris Wilsona8198ee2011-04-13 22:04:09 +01002903 /* Ensure that we invalidate the GPU's caches and TLBs. */
2904 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01002905 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00002906}
2907
Eric Anholte47c68e2008-11-14 13:35:19 -08002908/**
2909 * Moves a single object to the CPU read, and possibly write domain.
2910 *
2911 * This function returns when the move is complete, including waiting on
2912 * flushes to occur.
2913 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02002914int
Chris Wilson919926a2010-11-12 13:42:53 +00002915i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002916{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002917 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002918 int ret;
2919
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002920 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2921 return 0;
2922
Chris Wilson88241782011-01-07 17:09:48 +00002923 ret = i915_gem_object_flush_gpu_write_domain(obj);
2924 if (ret)
2925 return ret;
2926
Chris Wilsonf8413192012-04-10 11:52:50 +01002927 if (write || obj->pending_gpu_write) {
2928 ret = i915_gem_object_wait_rendering(obj);
2929 if (ret)
2930 return ret;
2931 }
Eric Anholte47c68e2008-11-14 13:35:19 -08002932
2933 i915_gem_object_flush_gtt_write_domain(obj);
2934
Chris Wilson05394f32010-11-08 19:18:58 +00002935 old_write_domain = obj->base.write_domain;
2936 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002937
Eric Anholte47c68e2008-11-14 13:35:19 -08002938 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00002939 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002940 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002941
Chris Wilson05394f32010-11-08 19:18:58 +00002942 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002943 }
2944
2945 /* It should now be out of any other write domains, and we can update
2946 * the domain values for our changes.
2947 */
Chris Wilson05394f32010-11-08 19:18:58 +00002948 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08002949
2950 /* If we're writing through the CPU, then the GPU read domains will
2951 * need to be invalidated at next use.
2952 */
2953 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002954 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2955 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002956 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002957
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002958 trace_i915_gem_object_change_domain(obj,
2959 old_read_domains,
2960 old_write_domain);
2961
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002962 return 0;
2963}
2964
Eric Anholt673a3942008-07-30 12:06:12 -07002965/* Throttle our rendering by waiting until the ring has completed our requests
2966 * emitted over 20 msec ago.
2967 *
Eric Anholtb9624422009-06-03 07:27:35 +00002968 * Note that if we were to use the current jiffies each time around the loop,
2969 * we wouldn't escape the function with any frames outstanding if the time to
2970 * render a frame was over 20ms.
2971 *
Eric Anholt673a3942008-07-30 12:06:12 -07002972 * This should get us reasonable parallelism between CPU and GPU but also
2973 * relatively low latency when blocking on a particular request to finish.
2974 */
2975static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002976i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07002977{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002978 struct drm_i915_private *dev_priv = dev->dev_private;
2979 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002980 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002981 struct drm_i915_gem_request *request;
2982 struct intel_ring_buffer *ring = NULL;
2983 u32 seqno = 0;
2984 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002985
Chris Wilsone110e8d2011-01-26 15:39:14 +00002986 if (atomic_read(&dev_priv->mm.wedged))
2987 return -EIO;
2988
Chris Wilson1c255952010-09-26 11:03:27 +01002989 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002990 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00002991 if (time_after_eq(request->emitted_jiffies, recent_enough))
2992 break;
2993
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002994 ring = request->ring;
2995 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00002996 }
Chris Wilson1c255952010-09-26 11:03:27 +01002997 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002998
2999 if (seqno == 0)
3000 return 0;
3001
3002 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003003 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003004 /* And wait for the seqno passing without holding any locks and
3005 * causing extra latency for others. This is safe as the irq
3006 * generation is designed to be run atomically and so is
3007 * lockless.
3008 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003009 if (ring->irq_get(ring)) {
3010 ret = wait_event_interruptible(ring->irq_queue,
3011 i915_seqno_passed(ring->get_seqno(ring), seqno)
3012 || atomic_read(&dev_priv->mm.wedged));
3013 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003014
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003015 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3016 ret = -EIO;
Eric Anholte959b5d2011-12-22 14:55:01 -08003017 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3018 seqno) ||
Eric Anholt7ea29b12011-12-22 14:54:59 -08003019 atomic_read(&dev_priv->mm.wedged), 3000)) {
3020 ret = -EBUSY;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003021 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003022 }
3023
3024 if (ret == 0)
3025 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003026
Eric Anholt673a3942008-07-30 12:06:12 -07003027 return ret;
3028}
3029
Eric Anholt673a3942008-07-30 12:06:12 -07003030int
Chris Wilson05394f32010-11-08 19:18:58 +00003031i915_gem_object_pin(struct drm_i915_gem_object *obj,
3032 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003033 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003034{
Eric Anholt673a3942008-07-30 12:06:12 -07003035 int ret;
3036
Chris Wilson05394f32010-11-08 19:18:58 +00003037 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003038
Chris Wilson05394f32010-11-08 19:18:58 +00003039 if (obj->gtt_space != NULL) {
3040 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3041 (map_and_fenceable && !obj->map_and_fenceable)) {
3042 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003043 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003044 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3045 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003046 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003047 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003048 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003049 ret = i915_gem_object_unbind(obj);
3050 if (ret)
3051 return ret;
3052 }
3053 }
3054
Chris Wilson05394f32010-11-08 19:18:58 +00003055 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003056 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003057 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003058 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003059 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003060 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003061
Daniel Vetter74898d72012-02-15 23:50:22 +01003062 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3063 i915_gem_gtt_bind_object(obj, obj->cache_level);
3064
Chris Wilson1b502472012-04-24 15:47:30 +01003065 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003066 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003067
3068 return 0;
3069}
3070
3071void
Chris Wilson05394f32010-11-08 19:18:58 +00003072i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003073{
Chris Wilson05394f32010-11-08 19:18:58 +00003074 BUG_ON(obj->pin_count == 0);
3075 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003076
Chris Wilson1b502472012-04-24 15:47:30 +01003077 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003078 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003079}
3080
3081int
3082i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003083 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003084{
3085 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003086 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003087 int ret;
3088
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003089 ret = i915_mutex_lock_interruptible(dev);
3090 if (ret)
3091 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003092
Chris Wilson05394f32010-11-08 19:18:58 +00003093 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003094 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003095 ret = -ENOENT;
3096 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003097 }
Eric Anholt673a3942008-07-30 12:06:12 -07003098
Chris Wilson05394f32010-11-08 19:18:58 +00003099 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003100 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003101 ret = -EINVAL;
3102 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003103 }
3104
Chris Wilson05394f32010-11-08 19:18:58 +00003105 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003106 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3107 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003108 ret = -EINVAL;
3109 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003110 }
3111
Chris Wilson05394f32010-11-08 19:18:58 +00003112 obj->user_pin_count++;
3113 obj->pin_filp = file;
3114 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003115 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003116 if (ret)
3117 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003118 }
3119
3120 /* XXX - flush the CPU caches for pinned objects
3121 * as the X server doesn't manage domains yet
3122 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003123 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003124 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003125out:
Chris Wilson05394f32010-11-08 19:18:58 +00003126 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003127unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003128 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003129 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003130}
3131
3132int
3133i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003134 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003135{
3136 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003137 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003138 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003139
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003140 ret = i915_mutex_lock_interruptible(dev);
3141 if (ret)
3142 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003143
Chris Wilson05394f32010-11-08 19:18:58 +00003144 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003145 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003146 ret = -ENOENT;
3147 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003148 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003149
Chris Wilson05394f32010-11-08 19:18:58 +00003150 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003151 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3152 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003153 ret = -EINVAL;
3154 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003155 }
Chris Wilson05394f32010-11-08 19:18:58 +00003156 obj->user_pin_count--;
3157 if (obj->user_pin_count == 0) {
3158 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003159 i915_gem_object_unpin(obj);
3160 }
Eric Anholt673a3942008-07-30 12:06:12 -07003161
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003162out:
Chris Wilson05394f32010-11-08 19:18:58 +00003163 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003164unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003165 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003166 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003167}
3168
3169int
3170i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003171 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003172{
3173 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003174 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003175 int ret;
3176
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003177 ret = i915_mutex_lock_interruptible(dev);
3178 if (ret)
3179 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003180
Chris Wilson05394f32010-11-08 19:18:58 +00003181 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003182 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003183 ret = -ENOENT;
3184 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003185 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003186
Chris Wilson0be555b2010-08-04 15:36:30 +01003187 /* Count all active objects as busy, even if they are currently not used
3188 * by the gpu. Users of this interface expect objects to eventually
3189 * become non-busy without any further actions, therefore emit any
3190 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003191 */
Chris Wilson05394f32010-11-08 19:18:58 +00003192 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003193 if (args->busy) {
3194 /* Unconditionally flush objects, even when the gpu still uses this
3195 * object. Userspace calling this function indicates that it wants to
3196 * use this buffer rather sooner than later, so issuing the required
3197 * flush earlier is beneficial.
3198 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003199 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003200 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00003201 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003202 } else if (obj->ring->outstanding_lazy_request ==
3203 obj->last_rendering_seqno) {
3204 struct drm_i915_gem_request *request;
3205
Chris Wilson7a194872010-12-07 10:38:40 +00003206 /* This ring is not being cleared by active usage,
3207 * so emit a request to do so.
3208 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003209 request = kzalloc(sizeof(*request), GFP_KERNEL);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003210 if (request) {
Akshay Joshi0206e352011-08-16 15:34:10 -04003211 ret = i915_add_request(obj->ring, NULL, request);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003212 if (ret)
3213 kfree(request);
3214 } else
Chris Wilson7a194872010-12-07 10:38:40 +00003215 ret = -ENOMEM;
3216 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003217
3218 /* Update the active list for the hardware's current position.
3219 * Otherwise this only updates on a delayed timer or when irqs
3220 * are actually unmasked, and our working set ends up being
3221 * larger than required.
3222 */
Chris Wilsondb53a302011-02-03 11:57:46 +00003223 i915_gem_retire_requests_ring(obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003224
Chris Wilson05394f32010-11-08 19:18:58 +00003225 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003226 }
Eric Anholt673a3942008-07-30 12:06:12 -07003227
Chris Wilson05394f32010-11-08 19:18:58 +00003228 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003229unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003230 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003231 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003232}
3233
3234int
3235i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3236 struct drm_file *file_priv)
3237{
Akshay Joshi0206e352011-08-16 15:34:10 -04003238 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003239}
3240
Chris Wilson3ef94da2009-09-14 16:50:29 +01003241int
3242i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3243 struct drm_file *file_priv)
3244{
3245 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003246 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003247 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003248
3249 switch (args->madv) {
3250 case I915_MADV_DONTNEED:
3251 case I915_MADV_WILLNEED:
3252 break;
3253 default:
3254 return -EINVAL;
3255 }
3256
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003257 ret = i915_mutex_lock_interruptible(dev);
3258 if (ret)
3259 return ret;
3260
Chris Wilson05394f32010-11-08 19:18:58 +00003261 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003262 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003263 ret = -ENOENT;
3264 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003265 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003266
Chris Wilson05394f32010-11-08 19:18:58 +00003267 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003268 ret = -EINVAL;
3269 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003270 }
3271
Chris Wilson05394f32010-11-08 19:18:58 +00003272 if (obj->madv != __I915_MADV_PURGED)
3273 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003274
Chris Wilson2d7ef392009-09-20 23:13:10 +01003275 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003276 if (i915_gem_object_is_purgeable(obj) &&
3277 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003278 i915_gem_object_truncate(obj);
3279
Chris Wilson05394f32010-11-08 19:18:58 +00003280 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003281
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003282out:
Chris Wilson05394f32010-11-08 19:18:58 +00003283 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003284unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003285 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003286 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003287}
3288
Chris Wilson05394f32010-11-08 19:18:58 +00003289struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3290 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003291{
Chris Wilson73aa8082010-09-30 11:46:12 +01003292 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003293 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003294 struct address_space *mapping;
Daniel Vetterc397b902010-04-09 19:05:07 +00003295
3296 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3297 if (obj == NULL)
3298 return NULL;
3299
3300 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3301 kfree(obj);
3302 return NULL;
3303 }
3304
Hugh Dickins5949eac2011-06-27 16:18:18 -07003305 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3306 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3307
Chris Wilson73aa8082010-09-30 11:46:12 +01003308 i915_gem_info_add_obj(dev_priv, size);
3309
Daniel Vetterc397b902010-04-09 19:05:07 +00003310 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3311 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3312
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003313 if (HAS_LLC(dev)) {
3314 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003315 * cache) for about a 10% performance improvement
3316 * compared to uncached. Graphics requests other than
3317 * display scanout are coherent with the CPU in
3318 * accessing this cache. This means in this mode we
3319 * don't need to clflush on the CPU side, and on the
3320 * GPU side we only need to flush internal caches to
3321 * get data visible to the CPU.
3322 *
3323 * However, we maintain the display planes as UC, and so
3324 * need to rebind when first used as such.
3325 */
3326 obj->cache_level = I915_CACHE_LLC;
3327 } else
3328 obj->cache_level = I915_CACHE_NONE;
3329
Daniel Vetter62b8b212010-04-09 19:05:08 +00003330 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003331 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003332 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003333 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003334 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003335 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003336 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003337 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003338 /* Avoid an unnecessary call to unbind on the first bind. */
3339 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003340
Chris Wilson05394f32010-11-08 19:18:58 +00003341 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003342}
3343
Eric Anholt673a3942008-07-30 12:06:12 -07003344int i915_gem_init_object(struct drm_gem_object *obj)
3345{
Daniel Vetterc397b902010-04-09 19:05:07 +00003346 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003347
Eric Anholt673a3942008-07-30 12:06:12 -07003348 return 0;
3349}
3350
Chris Wilson1488fc02012-04-24 15:47:31 +01003351void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003352{
Chris Wilson1488fc02012-04-24 15:47:31 +01003353 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003354 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003355 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003356
Chris Wilson26e12f892011-03-20 11:20:19 +00003357 trace_i915_gem_object_destroy(obj);
3358
Chris Wilson1488fc02012-04-24 15:47:31 +01003359 if (obj->phys_obj)
3360 i915_gem_detach_phys_object(dev, obj);
3361
3362 obj->pin_count = 0;
3363 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3364 bool was_interruptible;
3365
3366 was_interruptible = dev_priv->mm.interruptible;
3367 dev_priv->mm.interruptible = false;
3368
3369 WARN_ON(i915_gem_object_unbind(obj));
3370
3371 dev_priv->mm.interruptible = was_interruptible;
3372 }
3373
Chris Wilson05394f32010-11-08 19:18:58 +00003374 if (obj->base.map_list.map)
Rob Clarkb464e9a2011-08-10 08:09:08 -05003375 drm_gem_free_mmap_offset(&obj->base);
Chris Wilsonbe726152010-07-23 23:18:50 +01003376
Chris Wilson05394f32010-11-08 19:18:58 +00003377 drm_gem_object_release(&obj->base);
3378 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003379
Chris Wilson05394f32010-11-08 19:18:58 +00003380 kfree(obj->bit_17);
3381 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003382}
3383
Jesse Barnes5669fca2009-02-17 15:13:31 -08003384int
Eric Anholt673a3942008-07-30 12:06:12 -07003385i915_gem_idle(struct drm_device *dev)
3386{
3387 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003388 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003389
Keith Packard6dbe2772008-10-14 21:41:13 -07003390 mutex_lock(&dev->struct_mutex);
3391
Chris Wilson87acb0a2010-10-19 10:13:00 +01003392 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003393 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003394 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003395 }
Eric Anholt673a3942008-07-30 12:06:12 -07003396
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08003397 ret = i915_gpu_idle(dev, true);
Keith Packard6dbe2772008-10-14 21:41:13 -07003398 if (ret) {
3399 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003400 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003401 }
Eric Anholt673a3942008-07-30 12:06:12 -07003402
Chris Wilson29105cc2010-01-07 10:39:13 +00003403 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01003404 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3405 i915_gem_evict_everything(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003406
Chris Wilson312817a2010-11-22 11:50:11 +00003407 i915_gem_reset_fences(dev);
3408
Chris Wilson29105cc2010-01-07 10:39:13 +00003409 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3410 * We need to replace this with a semaphore, or something.
3411 * And not confound mm.suspended!
3412 */
3413 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003414 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003415
3416 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003417 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003418
Keith Packard6dbe2772008-10-14 21:41:13 -07003419 mutex_unlock(&dev->struct_mutex);
3420
Chris Wilson29105cc2010-01-07 10:39:13 +00003421 /* Cancel the retire work handler, which should be idle now. */
3422 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3423
Eric Anholt673a3942008-07-30 12:06:12 -07003424 return 0;
3425}
3426
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003427void i915_gem_init_swizzling(struct drm_device *dev)
3428{
3429 drm_i915_private_t *dev_priv = dev->dev_private;
3430
Daniel Vetter11782b02012-01-31 16:47:55 +01003431 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003432 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3433 return;
3434
3435 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3436 DISP_TILE_SURFACE_SWIZZLING);
3437
Daniel Vetter11782b02012-01-31 16:47:55 +01003438 if (IS_GEN5(dev))
3439 return;
3440
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003441 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3442 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02003443 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003444 else
Daniel Vetter6b26c862012-04-24 14:04:12 +02003445 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003446}
Daniel Vettere21af882012-02-09 20:53:27 +01003447
3448void i915_gem_init_ppgtt(struct drm_device *dev)
3449{
3450 drm_i915_private_t *dev_priv = dev->dev_private;
3451 uint32_t pd_offset;
3452 struct intel_ring_buffer *ring;
Daniel Vetter55a254a2012-03-22 00:14:43 +01003453 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3454 uint32_t __iomem *pd_addr;
3455 uint32_t pd_entry;
Daniel Vettere21af882012-02-09 20:53:27 +01003456 int i;
3457
3458 if (!dev_priv->mm.aliasing_ppgtt)
3459 return;
3460
Daniel Vetter55a254a2012-03-22 00:14:43 +01003461
3462 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3463 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3464 dma_addr_t pt_addr;
3465
3466 if (dev_priv->mm.gtt->needs_dmar)
3467 pt_addr = ppgtt->pt_dma_addr[i];
3468 else
3469 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3470
3471 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3472 pd_entry |= GEN6_PDE_VALID;
3473
3474 writel(pd_entry, pd_addr + i);
3475 }
3476 readl(pd_addr);
3477
3478 pd_offset = ppgtt->pd_offset;
Daniel Vettere21af882012-02-09 20:53:27 +01003479 pd_offset /= 64; /* in cachelines, */
3480 pd_offset <<= 16;
3481
3482 if (INTEL_INFO(dev)->gen == 6) {
Daniel Vetter48ecfa12012-04-11 20:42:40 +02003483 uint32_t ecochk, gab_ctl, ecobits;
3484
3485 ecobits = I915_READ(GAC_ECO_BITS);
3486 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
Daniel Vetterbe901a52012-04-11 20:42:39 +02003487
3488 gab_ctl = I915_READ(GAB_CTL);
3489 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3490
3491 ecochk = I915_READ(GAM_ECOCHK);
Daniel Vettere21af882012-02-09 20:53:27 +01003492 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3493 ECOCHK_PPGTT_CACHE64B);
Daniel Vetter6b26c862012-04-24 14:04:12 +02003494 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Daniel Vettere21af882012-02-09 20:53:27 +01003495 } else if (INTEL_INFO(dev)->gen >= 7) {
3496 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3497 /* GFX_MODE is per-ring on gen7+ */
3498 }
3499
3500 for (i = 0; i < I915_NUM_RINGS; i++) {
3501 ring = &dev_priv->ring[i];
3502
3503 if (INTEL_INFO(dev)->gen >= 7)
3504 I915_WRITE(RING_MODE_GEN7(ring),
Daniel Vetter6b26c862012-04-24 14:04:12 +02003505 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Daniel Vettere21af882012-02-09 20:53:27 +01003506
3507 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3508 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3509 }
3510}
3511
Eric Anholt673a3942008-07-30 12:06:12 -07003512int
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003513i915_gem_init_hw(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003514{
3515 drm_i915_private_t *dev_priv = dev->dev_private;
3516 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003517
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003518 i915_gem_init_swizzling(dev);
3519
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003520 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003521 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003522 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003523
3524 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003525 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003526 if (ret)
3527 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003528 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003529
Chris Wilson549f7362010-10-19 11:19:32 +01003530 if (HAS_BLT(dev)) {
3531 ret = intel_init_blt_ring_buffer(dev);
3532 if (ret)
3533 goto cleanup_bsd_ring;
3534 }
3535
Chris Wilson6f392d5482010-08-07 11:01:22 +01003536 dev_priv->next_seqno = 1;
3537
Daniel Vettere21af882012-02-09 20:53:27 +01003538 i915_gem_init_ppgtt(dev);
3539
Chris Wilson68f95ba2010-05-27 13:18:22 +01003540 return 0;
3541
Chris Wilson549f7362010-10-19 11:19:32 +01003542cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003543 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003544cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003545 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003546 return ret;
3547}
3548
Chris Wilson1070a422012-04-24 15:47:41 +01003549static bool
3550intel_enable_ppgtt(struct drm_device *dev)
3551{
3552 if (i915_enable_ppgtt >= 0)
3553 return i915_enable_ppgtt;
3554
3555#ifdef CONFIG_INTEL_IOMMU
3556 /* Disable ppgtt on SNB if VT-d is on. */
3557 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3558 return false;
3559#endif
3560
3561 return true;
3562}
3563
3564int i915_gem_init(struct drm_device *dev)
3565{
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3567 unsigned long gtt_size, mappable_size;
3568 int ret;
3569
3570 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3571 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3572
3573 mutex_lock(&dev->struct_mutex);
3574 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3575 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3576 * aperture accordingly when using aliasing ppgtt. */
3577 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3578
3579 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3580
3581 ret = i915_gem_init_aliasing_ppgtt(dev);
3582 if (ret) {
3583 mutex_unlock(&dev->struct_mutex);
3584 return ret;
3585 }
3586 } else {
3587 /* Let GEM Manage all of the aperture.
3588 *
3589 * However, leave one page at the end still bound to the scratch
3590 * page. There are a number of places where the hardware
3591 * apparently prefetches past the end of the object, and we've
3592 * seen multiple hangs with the GPU head pointer stuck in a
3593 * batchbuffer bound at the last page of the aperture. One page
3594 * should be enough to keep any prefetching inside of the
3595 * aperture.
3596 */
3597 i915_gem_init_global_gtt(dev, 0, mappable_size,
3598 gtt_size);
3599 }
3600
3601 ret = i915_gem_init_hw(dev);
3602 mutex_unlock(&dev->struct_mutex);
3603 if (ret) {
3604 i915_gem_cleanup_aliasing_ppgtt(dev);
3605 return ret;
3606 }
3607
3608 /* Allow hardware batchbuffers unless told otherwise. */
3609 dev_priv->allow_batchbuffer = 1;
3610 return 0;
3611}
3612
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003613void
3614i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3615{
3616 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003617 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003618
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003619 for (i = 0; i < I915_NUM_RINGS; i++)
3620 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003621}
3622
3623int
Eric Anholt673a3942008-07-30 12:06:12 -07003624i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3625 struct drm_file *file_priv)
3626{
3627 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003628 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003629
Jesse Barnes79e53942008-11-07 14:24:08 -08003630 if (drm_core_check_feature(dev, DRIVER_MODESET))
3631 return 0;
3632
Ben Gamariba1234d2009-09-14 17:48:47 -04003633 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003634 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003635 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003636 }
3637
Eric Anholt673a3942008-07-30 12:06:12 -07003638 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003639 dev_priv->mm.suspended = 0;
3640
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003641 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003642 if (ret != 0) {
3643 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003644 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003645 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003646
Chris Wilson69dc4982010-10-19 10:36:51 +01003647 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003648 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3649 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003650 for (i = 0; i < I915_NUM_RINGS; i++) {
3651 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3652 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3653 }
Eric Anholt673a3942008-07-30 12:06:12 -07003654 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003655
Chris Wilson5f353082010-06-07 14:03:03 +01003656 ret = drm_irq_install(dev);
3657 if (ret)
3658 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003659
Eric Anholt673a3942008-07-30 12:06:12 -07003660 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003661
3662cleanup_ringbuffer:
3663 mutex_lock(&dev->struct_mutex);
3664 i915_gem_cleanup_ringbuffer(dev);
3665 dev_priv->mm.suspended = 1;
3666 mutex_unlock(&dev->struct_mutex);
3667
3668 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003669}
3670
3671int
3672i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3673 struct drm_file *file_priv)
3674{
Jesse Barnes79e53942008-11-07 14:24:08 -08003675 if (drm_core_check_feature(dev, DRIVER_MODESET))
3676 return 0;
3677
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003678 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003679 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003680}
3681
3682void
3683i915_gem_lastclose(struct drm_device *dev)
3684{
3685 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003686
Eric Anholte806b492009-01-22 09:56:58 -08003687 if (drm_core_check_feature(dev, DRIVER_MODESET))
3688 return;
3689
Keith Packard6dbe2772008-10-14 21:41:13 -07003690 ret = i915_gem_idle(dev);
3691 if (ret)
3692 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003693}
3694
Chris Wilson64193402010-10-24 12:38:05 +01003695static void
3696init_ring_lists(struct intel_ring_buffer *ring)
3697{
3698 INIT_LIST_HEAD(&ring->active_list);
3699 INIT_LIST_HEAD(&ring->request_list);
3700 INIT_LIST_HEAD(&ring->gpu_write_list);
3701}
3702
Eric Anholt673a3942008-07-30 12:06:12 -07003703void
3704i915_gem_load(struct drm_device *dev)
3705{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003706 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003707 drm_i915_private_t *dev_priv = dev->dev_private;
3708
Chris Wilson69dc4982010-10-19 10:36:51 +01003709 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003710 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3711 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003712 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003713 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003714 for (i = 0; i < I915_NUM_RINGS; i++)
3715 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02003716 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003717 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003718 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3719 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003720 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003721
Dave Airlie94400122010-07-20 13:15:31 +10003722 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3723 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02003724 I915_WRITE(MI_ARB_STATE,
3725 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10003726 }
3727
Chris Wilson72bfa192010-12-19 11:42:05 +00003728 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3729
Jesse Barnesde151cf2008-11-12 10:03:55 -08003730 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003731 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3732 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003733
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003734 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003735 dev_priv->num_fence_regs = 16;
3736 else
3737 dev_priv->num_fence_regs = 8;
3738
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003739 /* Initialize fence registers to zero */
Chris Wilsonada726c2012-04-17 15:31:32 +01003740 i915_gem_reset_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07003741
Eric Anholt673a3942008-07-30 12:06:12 -07003742 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003743 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003744
Chris Wilsonce453d82011-02-21 14:43:56 +00003745 dev_priv->mm.interruptible = true;
3746
Chris Wilson17250b72010-10-28 12:51:39 +01003747 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3748 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3749 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003750}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003751
3752/*
3753 * Create a physically contiguous memory object for this object
3754 * e.g. for cursor + overlay regs
3755 */
Chris Wilson995b6762010-08-20 13:23:26 +01003756static int i915_gem_init_phys_object(struct drm_device *dev,
3757 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003758{
3759 drm_i915_private_t *dev_priv = dev->dev_private;
3760 struct drm_i915_gem_phys_object *phys_obj;
3761 int ret;
3762
3763 if (dev_priv->mm.phys_objs[id - 1] || !size)
3764 return 0;
3765
Eric Anholt9a298b22009-03-24 12:23:04 -07003766 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003767 if (!phys_obj)
3768 return -ENOMEM;
3769
3770 phys_obj->id = id;
3771
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003772 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003773 if (!phys_obj->handle) {
3774 ret = -ENOMEM;
3775 goto kfree_obj;
3776 }
3777#ifdef CONFIG_X86
3778 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3779#endif
3780
3781 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3782
3783 return 0;
3784kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003785 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003786 return ret;
3787}
3788
Chris Wilson995b6762010-08-20 13:23:26 +01003789static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003790{
3791 drm_i915_private_t *dev_priv = dev->dev_private;
3792 struct drm_i915_gem_phys_object *phys_obj;
3793
3794 if (!dev_priv->mm.phys_objs[id - 1])
3795 return;
3796
3797 phys_obj = dev_priv->mm.phys_objs[id - 1];
3798 if (phys_obj->cur_obj) {
3799 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3800 }
3801
3802#ifdef CONFIG_X86
3803 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3804#endif
3805 drm_pci_free(dev, phys_obj->handle);
3806 kfree(phys_obj);
3807 dev_priv->mm.phys_objs[id - 1] = NULL;
3808}
3809
3810void i915_gem_free_all_phys_object(struct drm_device *dev)
3811{
3812 int i;
3813
Dave Airlie260883c2009-01-22 17:58:49 +10003814 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003815 i915_gem_free_phys_object(dev, i);
3816}
3817
3818void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003819 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003820{
Chris Wilson05394f32010-11-08 19:18:58 +00003821 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003822 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003823 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003824 int page_count;
3825
Chris Wilson05394f32010-11-08 19:18:58 +00003826 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003827 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003828 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003829
Chris Wilson05394f32010-11-08 19:18:58 +00003830 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003831 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07003832 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003833 if (!IS_ERR(page)) {
3834 char *dst = kmap_atomic(page);
3835 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3836 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003837
Chris Wilsone5281cc2010-10-28 13:45:36 +01003838 drm_clflush_pages(&page, 1);
3839
3840 set_page_dirty(page);
3841 mark_page_accessed(page);
3842 page_cache_release(page);
3843 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003844 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003845 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003846
Chris Wilson05394f32010-11-08 19:18:58 +00003847 obj->phys_obj->cur_obj = NULL;
3848 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003849}
3850
3851int
3852i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003853 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003854 int id,
3855 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003856{
Chris Wilson05394f32010-11-08 19:18:58 +00003857 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003858 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003859 int ret = 0;
3860 int page_count;
3861 int i;
3862
3863 if (id > I915_MAX_PHYS_OBJECT)
3864 return -EINVAL;
3865
Chris Wilson05394f32010-11-08 19:18:58 +00003866 if (obj->phys_obj) {
3867 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003868 return 0;
3869 i915_gem_detach_phys_object(dev, obj);
3870 }
3871
Dave Airlie71acb5e2008-12-30 20:31:46 +10003872 /* create a new object */
3873 if (!dev_priv->mm.phys_objs[id - 1]) {
3874 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003875 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003876 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003877 DRM_ERROR("failed to init phys object %d size: %zu\n",
3878 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003879 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003880 }
3881 }
3882
3883 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00003884 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3885 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003886
Chris Wilson05394f32010-11-08 19:18:58 +00003887 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003888
3889 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003890 struct page *page;
3891 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003892
Hugh Dickins5949eac2011-06-27 16:18:18 -07003893 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003894 if (IS_ERR(page))
3895 return PTR_ERR(page);
3896
Chris Wilsonff75b9b2010-10-30 22:52:31 +01003897 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00003898 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003899 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07003900 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003901
3902 mark_page_accessed(page);
3903 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003904 }
3905
3906 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003907}
3908
3909static int
Chris Wilson05394f32010-11-08 19:18:58 +00003910i915_gem_phys_pwrite(struct drm_device *dev,
3911 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10003912 struct drm_i915_gem_pwrite *args,
3913 struct drm_file *file_priv)
3914{
Chris Wilson05394f32010-11-08 19:18:58 +00003915 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003916 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003917
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003918 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
3919 unsigned long unwritten;
3920
3921 /* The physical object once assigned is fixed for the lifetime
3922 * of the obj, so we can safely drop the lock and continue
3923 * to access vaddr.
3924 */
3925 mutex_unlock(&dev->struct_mutex);
3926 unwritten = copy_from_user(vaddr, user_data, args->size);
3927 mutex_lock(&dev->struct_mutex);
3928 if (unwritten)
3929 return -EFAULT;
3930 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003931
Daniel Vetter40ce6572010-11-05 18:12:18 +01003932 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10003933 return 0;
3934}
Eric Anholtb9624422009-06-03 07:27:35 +00003935
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003936void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00003937{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003938 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003939
3940 /* Clean up our request list when the client is going away, so that
3941 * later retire_requests won't dereference our soon-to-be-gone
3942 * file_priv.
3943 */
Chris Wilson1c255952010-09-26 11:03:27 +01003944 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003945 while (!list_empty(&file_priv->mm.request_list)) {
3946 struct drm_i915_gem_request *request;
3947
3948 request = list_first_entry(&file_priv->mm.request_list,
3949 struct drm_i915_gem_request,
3950 client_list);
3951 list_del(&request->client_list);
3952 request->file_priv = NULL;
3953 }
Chris Wilson1c255952010-09-26 11:03:27 +01003954 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00003955}
Chris Wilson31169712009-09-14 16:50:28 +01003956
Chris Wilson31169712009-09-14 16:50:28 +01003957static int
Chris Wilson1637ef42010-04-20 17:10:35 +01003958i915_gpu_is_active(struct drm_device *dev)
3959{
3960 drm_i915_private_t *dev_priv = dev->dev_private;
3961 int lists_empty;
3962
Chris Wilson1637ef42010-04-20 17:10:35 +01003963 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01003964 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01003965
3966 return !lists_empty;
3967}
3968
3969static int
Ying Han1495f232011-05-24 17:12:27 -07003970i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01003971{
Chris Wilson17250b72010-10-28 12:51:39 +01003972 struct drm_i915_private *dev_priv =
3973 container_of(shrinker,
3974 struct drm_i915_private,
3975 mm.inactive_shrinker);
3976 struct drm_device *dev = dev_priv->dev;
3977 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07003978 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01003979 int cnt;
3980
3981 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01003982 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01003983
3984 /* "fast-path" to count number of available objects */
3985 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01003986 cnt = 0;
3987 list_for_each_entry(obj,
3988 &dev_priv->mm.inactive_list,
3989 mm_list)
3990 cnt++;
3991 mutex_unlock(&dev->struct_mutex);
3992 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01003993 }
3994
Chris Wilson1637ef42010-04-20 17:10:35 +01003995rescan:
Chris Wilson31169712009-09-14 16:50:28 +01003996 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01003997 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01003998
Chris Wilson17250b72010-10-28 12:51:39 +01003999 list_for_each_entry_safe(obj, next,
4000 &dev_priv->mm.inactive_list,
4001 mm_list) {
4002 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004003 if (i915_gem_object_unbind(obj) == 0 &&
4004 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004005 break;
Chris Wilson31169712009-09-14 16:50:28 +01004006 }
Chris Wilson31169712009-09-14 16:50:28 +01004007 }
4008
4009 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004010 cnt = 0;
4011 list_for_each_entry_safe(obj, next,
4012 &dev_priv->mm.inactive_list,
4013 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004014 if (nr_to_scan &&
4015 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004016 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004017 else
Chris Wilson17250b72010-10-28 12:51:39 +01004018 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004019 }
4020
Chris Wilson17250b72010-10-28 12:51:39 +01004021 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004022 /*
4023 * We are desperate for pages, so as a last resort, wait
4024 * for the GPU to finish and discard whatever we can.
4025 * This has a dramatic impact to reduce the number of
4026 * OOM-killer events whilst running the GPU aggressively.
4027 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08004028 if (i915_gpu_idle(dev, true) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004029 goto rescan;
4030 }
Chris Wilson17250b72010-10-28 12:51:39 +01004031 mutex_unlock(&dev->struct_mutex);
4032 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004033}