blob: 4be096068b35283ddb469a4d82c07ba60b6b5586 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Daniel Vetter1286ff72012-05-10 15:25:09 +020038#include <linux/dma-buf.h>
Eric Anholt673a3942008-07-30 12:06:12 -070039
Chris Wilson88241782011-01-07 17:09:48 +000040static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000041static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000043static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
44 unsigned alignment,
45 bool map_and_fenceable);
Chris Wilson05394f32010-11-08 19:18:58 +000046static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100048 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000049 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070050
Chris Wilson61050802012-04-17 15:31:31 +010051static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
55 bool enable);
56
Chris Wilson17250b72010-10-28 12:51:39 +010057static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070058 struct shrink_control *sc);
Daniel Vetter8c599672011-12-14 13:57:31 +010059static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010060
Chris Wilson61050802012-04-17 15:31:31 +010061static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
62{
63 if (obj->tiling_mode)
64 i915_gem_release_mmap(obj);
65
66 /* As we do not have an associated fence register, we will force
67 * a tiling change if we ever need to acquire one.
68 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010069 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010070 obj->fence_reg = I915_FENCE_REG_NONE;
71}
72
Chris Wilson73aa8082010-09-30 11:46:12 +010073/* some bookkeeping */
74static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
75 size_t size)
76{
77 dev_priv->mm.object_count++;
78 dev_priv->mm.object_memory += size;
79}
80
81static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
82 size_t size)
83{
84 dev_priv->mm.object_count--;
85 dev_priv->mm.object_memory -= size;
86}
87
Chris Wilson21dd3732011-01-26 15:55:56 +000088static int
89i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010090{
91 struct drm_i915_private *dev_priv = dev->dev_private;
92 struct completion *x = &dev_priv->error_completion;
93 unsigned long flags;
94 int ret;
95
96 if (!atomic_read(&dev_priv->mm.wedged))
97 return 0;
98
Daniel Vetter0a6759c2012-07-04 22:18:41 +020099 /*
100 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
101 * userspace. If it takes that long something really bad is going on and
102 * we should simply try to bail out and fail as gracefully as possible.
103 */
104 ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
105 if (ret == 0) {
106 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
107 return -EIO;
108 } else if (ret < 0) {
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100109 return ret;
Daniel Vetter0a6759c2012-07-04 22:18:41 +0200110 }
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100111
Chris Wilson21dd3732011-01-26 15:55:56 +0000112 if (atomic_read(&dev_priv->mm.wedged)) {
113 /* GPU is hung, bump the completion count to account for
114 * the token we just consumed so that we never hit zero and
115 * end up waiting upon a subsequent completion event that
116 * will never happen.
117 */
118 spin_lock_irqsave(&x->wait.lock, flags);
119 x->done++;
120 spin_unlock_irqrestore(&x->wait.lock, flags);
121 }
122 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100123}
124
Chris Wilson54cf91d2010-11-25 18:00:26 +0000125int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100126{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100127 int ret;
128
Chris Wilson21dd3732011-01-26 15:55:56 +0000129 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100130 if (ret)
131 return ret;
132
133 ret = mutex_lock_interruptible(&dev->struct_mutex);
134 if (ret)
135 return ret;
136
Chris Wilson23bc5982010-09-29 16:10:57 +0100137 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100138 return 0;
139}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100140
Chris Wilson7d1c4802010-08-07 21:45:03 +0100141static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000142i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100143{
Chris Wilson1b502472012-04-24 15:47:30 +0100144 return !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100145}
146
Eric Anholt673a3942008-07-30 12:06:12 -0700147int
148i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000149 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700150{
Eric Anholt673a3942008-07-30 12:06:12 -0700151 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000152
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200153 if (drm_core_check_feature(dev, DRIVER_MODESET))
154 return -ENODEV;
155
Chris Wilson20217462010-11-23 15:26:33 +0000156 if (args->gtt_start >= args->gtt_end ||
157 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
158 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700159
Daniel Vetterf534bc02012-03-26 22:37:04 +0200160 /* GEM with user mode setting was never supported on ilk and later. */
161 if (INTEL_INFO(dev)->gen >= 5)
162 return -ENODEV;
163
Eric Anholt673a3942008-07-30 12:06:12 -0700164 mutex_lock(&dev->struct_mutex);
Daniel Vetter644ec022012-03-26 09:45:40 +0200165 i915_gem_init_global_gtt(dev, args->gtt_start,
166 args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700167 mutex_unlock(&dev->struct_mutex);
168
Chris Wilson20217462010-11-23 15:26:33 +0000169 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700170}
171
Eric Anholt5a125c32008-10-22 21:40:13 -0700172int
173i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000174 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700175{
Chris Wilson73aa8082010-09-30 11:46:12 +0100176 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700177 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000178 struct drm_i915_gem_object *obj;
179 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700180
Chris Wilson6299f992010-11-24 12:23:44 +0000181 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100182 mutex_lock(&dev->struct_mutex);
Chris Wilson1b502472012-04-24 15:47:30 +0100183 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
184 if (obj->pin_count)
185 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100186 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700187
Chris Wilson6299f992010-11-24 12:23:44 +0000188 args->aper_size = dev_priv->mm.gtt_total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400189 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000190
Eric Anholt5a125c32008-10-22 21:40:13 -0700191 return 0;
192}
193
Dave Airlieff72145b2011-02-07 12:16:14 +1000194static int
195i915_gem_create(struct drm_file *file,
196 struct drm_device *dev,
197 uint64_t size,
198 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700199{
Chris Wilson05394f32010-11-08 19:18:58 +0000200 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300201 int ret;
202 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700203
Dave Airlieff72145b2011-02-07 12:16:14 +1000204 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200205 if (size == 0)
206 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700207
208 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000209 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700210 if (obj == NULL)
211 return -ENOMEM;
212
Chris Wilson05394f32010-11-08 19:18:58 +0000213 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100214 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000215 drm_gem_object_release(&obj->base);
216 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100217 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700218 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100219 }
220
Chris Wilson202f2fe2010-10-14 13:20:40 +0100221 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000222 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100223 trace_i915_gem_object_create(obj);
224
Dave Airlieff72145b2011-02-07 12:16:14 +1000225 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700226 return 0;
227}
228
Dave Airlieff72145b2011-02-07 12:16:14 +1000229int
230i915_gem_dumb_create(struct drm_file *file,
231 struct drm_device *dev,
232 struct drm_mode_create_dumb *args)
233{
234 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000235 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000236 args->size = args->pitch * args->height;
237 return i915_gem_create(file, dev,
238 args->size, &args->handle);
239}
240
241int i915_gem_dumb_destroy(struct drm_file *file,
242 struct drm_device *dev,
243 uint32_t handle)
244{
245 return drm_gem_handle_delete(file, handle);
246}
247
248/**
249 * Creates a new mm object and returns a handle to it.
250 */
251int
252i915_gem_create_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *file)
254{
255 struct drm_i915_gem_create *args = data;
Daniel Vetter63ed2cb2012-04-23 16:50:50 +0200256
Dave Airlieff72145b2011-02-07 12:16:14 +1000257 return i915_gem_create(file, dev,
258 args->size, &args->handle);
259}
260
Chris Wilson05394f32010-11-08 19:18:58 +0000261static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700262{
Chris Wilson05394f32010-11-08 19:18:58 +0000263 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700264
265 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000266 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700267}
268
Daniel Vetter8c599672011-12-14 13:57:31 +0100269static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100270__copy_to_user_swizzled(char __user *cpu_vaddr,
271 const char *gpu_vaddr, int gpu_offset,
272 int length)
273{
274 int ret, cpu_offset = 0;
275
276 while (length > 0) {
277 int cacheline_end = ALIGN(gpu_offset + 1, 64);
278 int this_length = min(cacheline_end - gpu_offset, length);
279 int swizzled_gpu_offset = gpu_offset ^ 64;
280
281 ret = __copy_to_user(cpu_vaddr + cpu_offset,
282 gpu_vaddr + swizzled_gpu_offset,
283 this_length);
284 if (ret)
285 return ret + length;
286
287 cpu_offset += this_length;
288 gpu_offset += this_length;
289 length -= this_length;
290 }
291
292 return 0;
293}
294
295static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700296__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
297 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100298 int length)
299{
300 int ret, cpu_offset = 0;
301
302 while (length > 0) {
303 int cacheline_end = ALIGN(gpu_offset + 1, 64);
304 int this_length = min(cacheline_end - gpu_offset, length);
305 int swizzled_gpu_offset = gpu_offset ^ 64;
306
307 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
308 cpu_vaddr + cpu_offset,
309 this_length);
310 if (ret)
311 return ret + length;
312
313 cpu_offset += this_length;
314 gpu_offset += this_length;
315 length -= this_length;
316 }
317
318 return 0;
319}
320
Daniel Vetterd174bd62012-03-25 19:47:40 +0200321/* Per-page copy function for the shmem pread fastpath.
322 * Flushes invalid cachelines before reading the target if
323 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700324static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200325shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
326 char __user *user_data,
327 bool page_do_bit17_swizzling, bool needs_clflush)
328{
329 char *vaddr;
330 int ret;
331
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200332 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200333 return -EINVAL;
334
335 vaddr = kmap_atomic(page);
336 if (needs_clflush)
337 drm_clflush_virt_range(vaddr + shmem_page_offset,
338 page_length);
339 ret = __copy_to_user_inatomic(user_data,
340 vaddr + shmem_page_offset,
341 page_length);
342 kunmap_atomic(vaddr);
343
344 return ret;
345}
346
Daniel Vetter23c18c72012-03-25 19:47:42 +0200347static void
348shmem_clflush_swizzled_range(char *addr, unsigned long length,
349 bool swizzled)
350{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200351 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200352 unsigned long start = (unsigned long) addr;
353 unsigned long end = (unsigned long) addr + length;
354
355 /* For swizzling simply ensure that we always flush both
356 * channels. Lame, but simple and it works. Swizzled
357 * pwrite/pread is far from a hotpath - current userspace
358 * doesn't use it at all. */
359 start = round_down(start, 128);
360 end = round_up(end, 128);
361
362 drm_clflush_virt_range((void *)start, end - start);
363 } else {
364 drm_clflush_virt_range(addr, length);
365 }
366
367}
368
Daniel Vetterd174bd62012-03-25 19:47:40 +0200369/* Only difference to the fast-path function is that this can handle bit17
370 * and uses non-atomic copy and kmap functions. */
371static int
372shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
373 char __user *user_data,
374 bool page_do_bit17_swizzling, bool needs_clflush)
375{
376 char *vaddr;
377 int ret;
378
379 vaddr = kmap(page);
380 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200381 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
382 page_length,
383 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200384
385 if (page_do_bit17_swizzling)
386 ret = __copy_to_user_swizzled(user_data,
387 vaddr, shmem_page_offset,
388 page_length);
389 else
390 ret = __copy_to_user(user_data,
391 vaddr + shmem_page_offset,
392 page_length);
393 kunmap(page);
394
395 return ret;
396}
397
Eric Anholteb014592009-03-10 11:44:52 -0700398static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200399i915_gem_shmem_pread(struct drm_device *dev,
400 struct drm_i915_gem_object *obj,
401 struct drm_i915_gem_pread *args,
402 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700403{
Chris Wilson05394f32010-11-08 19:18:58 +0000404 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Daniel Vetter8461d222011-12-14 13:57:32 +0100405 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700406 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100407 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100408 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100409 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200410 int hit_slowpath = 0;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200411 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200412 int needs_clflush = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200413 int release_page;
Eric Anholteb014592009-03-10 11:44:52 -0700414
Daniel Vetter8461d222011-12-14 13:57:32 +0100415 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholteb014592009-03-10 11:44:52 -0700416 remain = args->size;
417
Daniel Vetter8461d222011-12-14 13:57:32 +0100418 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700419
Daniel Vetter84897312012-03-25 19:47:31 +0200420 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
421 /* If we're not in the cpu read domain, set ourself into the gtt
422 * read domain and manually flush cachelines (if required). This
423 * optimizes for the case when the gpu will dirty the data
424 * anyway again before the next pread happens. */
425 if (obj->cache_level == I915_CACHE_NONE)
426 needs_clflush = 1;
427 ret = i915_gem_object_set_to_gtt_domain(obj, false);
428 if (ret)
429 return ret;
430 }
Eric Anholteb014592009-03-10 11:44:52 -0700431
Eric Anholteb014592009-03-10 11:44:52 -0700432 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100433
Eric Anholteb014592009-03-10 11:44:52 -0700434 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100435 struct page *page;
436
Eric Anholteb014592009-03-10 11:44:52 -0700437 /* Operation in this page
438 *
Eric Anholteb014592009-03-10 11:44:52 -0700439 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700440 * page_length = bytes to copy for this page
441 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100442 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700443 page_length = remain;
444 if ((shmem_page_offset + page_length) > PAGE_SIZE)
445 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700446
Daniel Vetter692a5762012-03-25 19:47:34 +0200447 if (obj->pages) {
448 page = obj->pages[offset >> PAGE_SHIFT];
449 release_page = 0;
450 } else {
451 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
452 if (IS_ERR(page)) {
453 ret = PTR_ERR(page);
454 goto out;
455 }
456 release_page = 1;
Jesper Juhlb65552f2011-06-12 20:53:44 +0000457 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100458
Daniel Vetter8461d222011-12-14 13:57:32 +0100459 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
460 (page_to_phys(page) & (1 << 17)) != 0;
461
Daniel Vetterd174bd62012-03-25 19:47:40 +0200462 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
463 user_data, page_do_bit17_swizzling,
464 needs_clflush);
465 if (ret == 0)
466 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700467
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200468 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200469 page_cache_get(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200470 mutex_unlock(&dev->struct_mutex);
471
Daniel Vetter96d79b52012-03-25 19:47:36 +0200472 if (!prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200473 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200474 /* Userspace is tricking us, but we've already clobbered
475 * its pages with the prefault and promised to write the
476 * data up to the first fault. Hence ignore any errors
477 * and just continue. */
478 (void)ret;
479 prefaulted = 1;
480 }
481
Daniel Vetterd174bd62012-03-25 19:47:40 +0200482 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
483 user_data, page_do_bit17_swizzling,
484 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700485
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200486 mutex_lock(&dev->struct_mutex);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100487 page_cache_release(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200488next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100489 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200490 if (release_page)
491 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100492
Daniel Vetter8461d222011-12-14 13:57:32 +0100493 if (ret) {
494 ret = -EFAULT;
495 goto out;
496 }
497
Eric Anholteb014592009-03-10 11:44:52 -0700498 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100499 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700500 offset += page_length;
501 }
502
Chris Wilson4f27b752010-10-14 15:26:45 +0100503out:
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200504 if (hit_slowpath) {
505 /* Fixup: Kill any reinstated backing storage pages */
506 if (obj->madv == __I915_MADV_PURGED)
507 i915_gem_object_truncate(obj);
508 }
Eric Anholteb014592009-03-10 11:44:52 -0700509
510 return ret;
511}
512
Eric Anholt673a3942008-07-30 12:06:12 -0700513/**
514 * Reads data from the object referenced by handle.
515 *
516 * On error, the contents of *data are undefined.
517 */
518int
519i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000520 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700521{
522 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000523 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100524 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700525
Chris Wilson51311d02010-11-17 09:10:42 +0000526 if (args->size == 0)
527 return 0;
528
529 if (!access_ok(VERIFY_WRITE,
530 (char __user *)(uintptr_t)args->data_ptr,
531 args->size))
532 return -EFAULT;
533
Chris Wilson4f27b752010-10-14 15:26:45 +0100534 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100535 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100536 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700537
Chris Wilson05394f32010-11-08 19:18:58 +0000538 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000539 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100540 ret = -ENOENT;
541 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100542 }
Eric Anholt673a3942008-07-30 12:06:12 -0700543
Chris Wilson7dcd2492010-09-26 20:21:44 +0100544 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000545 if (args->offset > obj->base.size ||
546 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100547 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100548 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100549 }
550
Daniel Vetter1286ff72012-05-10 15:25:09 +0200551 /* prime objects have no backing filp to GEM pread/pwrite
552 * pages from.
553 */
554 if (!obj->base.filp) {
555 ret = -EINVAL;
556 goto out;
557 }
558
Chris Wilsondb53a302011-02-03 11:57:46 +0000559 trace_i915_gem_object_pread(obj, args->offset, args->size);
560
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200561 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700562
Chris Wilson35b62a82010-09-26 20:23:38 +0100563out:
Chris Wilson05394f32010-11-08 19:18:58 +0000564 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100565unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100566 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700567 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700568}
569
Keith Packard0839ccb2008-10-30 19:38:48 -0700570/* This is the fast write path which cannot handle
571 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700572 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700573
Keith Packard0839ccb2008-10-30 19:38:48 -0700574static inline int
575fast_user_write(struct io_mapping *mapping,
576 loff_t page_base, int page_offset,
577 char __user *user_data,
578 int length)
579{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700580 void __iomem *vaddr_atomic;
581 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700582 unsigned long unwritten;
583
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700584 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700585 /* We can use the cpu mem copy function because this is X86. */
586 vaddr = (void __force*)vaddr_atomic + page_offset;
587 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700588 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700589 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100590 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700591}
592
Eric Anholt3de09aa2009-03-09 09:42:23 -0700593/**
594 * This is the fast pwrite path, where we copy the data directly from the
595 * user into the GTT, uncached.
596 */
Eric Anholt673a3942008-07-30 12:06:12 -0700597static int
Chris Wilson05394f32010-11-08 19:18:58 +0000598i915_gem_gtt_pwrite_fast(struct drm_device *dev,
599 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700600 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000601 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700602{
Keith Packard0839ccb2008-10-30 19:38:48 -0700603 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700604 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700605 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700606 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200607 int page_offset, page_length, ret;
608
609 ret = i915_gem_object_pin(obj, 0, true);
610 if (ret)
611 goto out;
612
613 ret = i915_gem_object_set_to_gtt_domain(obj, true);
614 if (ret)
615 goto out_unpin;
616
617 ret = i915_gem_object_put_fence(obj);
618 if (ret)
619 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700620
621 user_data = (char __user *) (uintptr_t) args->data_ptr;
622 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700623
Chris Wilson05394f32010-11-08 19:18:58 +0000624 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700625
626 while (remain > 0) {
627 /* Operation in this page
628 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700629 * page_base = page offset within aperture
630 * page_offset = offset within page
631 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700632 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100633 page_base = offset & PAGE_MASK;
634 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700635 page_length = remain;
636 if ((page_offset + remain) > PAGE_SIZE)
637 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700638
Keith Packard0839ccb2008-10-30 19:38:48 -0700639 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700640 * source page isn't available. Return the error and we'll
641 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700642 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100643 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200644 page_offset, user_data, page_length)) {
645 ret = -EFAULT;
646 goto out_unpin;
647 }
Eric Anholt673a3942008-07-30 12:06:12 -0700648
Keith Packard0839ccb2008-10-30 19:38:48 -0700649 remain -= page_length;
650 user_data += page_length;
651 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700652 }
Eric Anholt673a3942008-07-30 12:06:12 -0700653
Daniel Vetter935aaa62012-03-25 19:47:35 +0200654out_unpin:
655 i915_gem_object_unpin(obj);
656out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700657 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700658}
659
Daniel Vetterd174bd62012-03-25 19:47:40 +0200660/* Per-page copy function for the shmem pwrite fastpath.
661 * Flushes invalid cachelines before writing to the target if
662 * needs_clflush_before is set and flushes out any written cachelines after
663 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700664static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200665shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
666 char __user *user_data,
667 bool page_do_bit17_swizzling,
668 bool needs_clflush_before,
669 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700670{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200671 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700673
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200674 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200675 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700676
Daniel Vetterd174bd62012-03-25 19:47:40 +0200677 vaddr = kmap_atomic(page);
678 if (needs_clflush_before)
679 drm_clflush_virt_range(vaddr + shmem_page_offset,
680 page_length);
681 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
682 user_data,
683 page_length);
684 if (needs_clflush_after)
685 drm_clflush_virt_range(vaddr + shmem_page_offset,
686 page_length);
687 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700688
689 return ret;
690}
691
Daniel Vetterd174bd62012-03-25 19:47:40 +0200692/* Only difference to the fast-path function is that this can handle bit17
693 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700694static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200695shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
696 char __user *user_data,
697 bool page_do_bit17_swizzling,
698 bool needs_clflush_before,
699 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700700{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200701 char *vaddr;
702 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700703
Daniel Vetterd174bd62012-03-25 19:47:40 +0200704 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200705 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200706 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
707 page_length,
708 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200709 if (page_do_bit17_swizzling)
710 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100711 user_data,
712 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200713 else
714 ret = __copy_from_user(vaddr + shmem_page_offset,
715 user_data,
716 page_length);
717 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200718 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
719 page_length,
720 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200721 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100722
Daniel Vetterd174bd62012-03-25 19:47:40 +0200723 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700724}
725
Eric Anholt40123c12009-03-09 13:42:30 -0700726static int
Daniel Vettere244a442012-03-25 19:47:28 +0200727i915_gem_shmem_pwrite(struct drm_device *dev,
728 struct drm_i915_gem_object *obj,
729 struct drm_i915_gem_pwrite *args,
730 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700731{
Chris Wilson05394f32010-11-08 19:18:58 +0000732 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700733 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100734 loff_t offset;
735 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100736 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100737 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200738 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200739 int needs_clflush_after = 0;
740 int needs_clflush_before = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200741 int release_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700742
Daniel Vetter8c599672011-12-14 13:57:31 +0100743 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholt40123c12009-03-09 13:42:30 -0700744 remain = args->size;
745
Daniel Vetter8c599672011-12-14 13:57:31 +0100746 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700747
Daniel Vetter58642882012-03-25 19:47:37 +0200748 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
749 /* If we're not in the cpu write domain, set ourself into the gtt
750 * write domain and manually flush cachelines (if required). This
751 * optimizes for the case when the gpu will use the data
752 * right away and we therefore have to clflush anyway. */
753 if (obj->cache_level == I915_CACHE_NONE)
754 needs_clflush_after = 1;
755 ret = i915_gem_object_set_to_gtt_domain(obj, true);
756 if (ret)
757 return ret;
758 }
759 /* Same trick applies for invalidate partially written cachelines before
760 * writing. */
761 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
762 && obj->cache_level == I915_CACHE_NONE)
763 needs_clflush_before = 1;
764
Eric Anholt40123c12009-03-09 13:42:30 -0700765 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000766 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700767
768 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100769 struct page *page;
Daniel Vetter58642882012-03-25 19:47:37 +0200770 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100771
Eric Anholt40123c12009-03-09 13:42:30 -0700772 /* Operation in this page
773 *
Eric Anholt40123c12009-03-09 13:42:30 -0700774 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700775 * page_length = bytes to copy for this page
776 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100777 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700778
779 page_length = remain;
780 if ((shmem_page_offset + page_length) > PAGE_SIZE)
781 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700782
Daniel Vetter58642882012-03-25 19:47:37 +0200783 /* If we don't overwrite a cacheline completely we need to be
784 * careful to have up-to-date data by first clflushing. Don't
785 * overcomplicate things and flush the entire patch. */
786 partial_cacheline_write = needs_clflush_before &&
787 ((shmem_page_offset | page_length)
788 & (boot_cpu_data.x86_clflush_size - 1));
789
Daniel Vetter692a5762012-03-25 19:47:34 +0200790 if (obj->pages) {
791 page = obj->pages[offset >> PAGE_SHIFT];
792 release_page = 0;
793 } else {
794 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
795 if (IS_ERR(page)) {
796 ret = PTR_ERR(page);
797 goto out;
798 }
799 release_page = 1;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100800 }
801
Daniel Vetter8c599672011-12-14 13:57:31 +0100802 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
803 (page_to_phys(page) & (1 << 17)) != 0;
804
Daniel Vetterd174bd62012-03-25 19:47:40 +0200805 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
806 user_data, page_do_bit17_swizzling,
807 partial_cacheline_write,
808 needs_clflush_after);
809 if (ret == 0)
810 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700811
Daniel Vettere244a442012-03-25 19:47:28 +0200812 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200813 page_cache_get(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200814 mutex_unlock(&dev->struct_mutex);
815
Daniel Vetterd174bd62012-03-25 19:47:40 +0200816 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
817 user_data, page_do_bit17_swizzling,
818 partial_cacheline_write,
819 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700820
Daniel Vettere244a442012-03-25 19:47:28 +0200821 mutex_lock(&dev->struct_mutex);
Daniel Vetter692a5762012-03-25 19:47:34 +0200822 page_cache_release(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200823next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100824 set_page_dirty(page);
825 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200826 if (release_page)
827 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100828
Daniel Vetter8c599672011-12-14 13:57:31 +0100829 if (ret) {
830 ret = -EFAULT;
831 goto out;
832 }
833
Eric Anholt40123c12009-03-09 13:42:30 -0700834 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100835 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700836 offset += page_length;
837 }
838
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100839out:
Daniel Vettere244a442012-03-25 19:47:28 +0200840 if (hit_slowpath) {
841 /* Fixup: Kill any reinstated backing storage pages */
842 if (obj->madv == __I915_MADV_PURGED)
843 i915_gem_object_truncate(obj);
844 /* and flush dirty cachelines in case the object isn't in the cpu write
845 * domain anymore. */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj);
848 intel_gtt_chipset_flush();
849 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100850 }
Eric Anholt40123c12009-03-09 13:42:30 -0700851
Daniel Vetter58642882012-03-25 19:47:37 +0200852 if (needs_clflush_after)
853 intel_gtt_chipset_flush();
854
Eric Anholt40123c12009-03-09 13:42:30 -0700855 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700856}
857
858/**
859 * Writes data to the object referenced by handle.
860 *
861 * On error, the contents of the buffer that were to be modified are undefined.
862 */
863int
864i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100865 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700866{
867 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000868 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000869 int ret;
870
871 if (args->size == 0)
872 return 0;
873
874 if (!access_ok(VERIFY_READ,
875 (char __user *)(uintptr_t)args->data_ptr,
876 args->size))
877 return -EFAULT;
878
Daniel Vetterf56f8212012-03-25 19:47:41 +0200879 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
880 args->size);
Chris Wilson51311d02010-11-17 09:10:42 +0000881 if (ret)
882 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700883
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100884 ret = i915_mutex_lock_interruptible(dev);
885 if (ret)
886 return ret;
887
Chris Wilson05394f32010-11-08 19:18:58 +0000888 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000889 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100890 ret = -ENOENT;
891 goto unlock;
892 }
Eric Anholt673a3942008-07-30 12:06:12 -0700893
Chris Wilson7dcd2492010-09-26 20:21:44 +0100894 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000895 if (args->offset > obj->base.size ||
896 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100897 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100898 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100899 }
900
Daniel Vetter1286ff72012-05-10 15:25:09 +0200901 /* prime objects have no backing filp to GEM pread/pwrite
902 * pages from.
903 */
904 if (!obj->base.filp) {
905 ret = -EINVAL;
906 goto out;
907 }
908
Chris Wilsondb53a302011-02-03 11:57:46 +0000909 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
910
Daniel Vetter935aaa62012-03-25 19:47:35 +0200911 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700912 /* We can only do the GTT pwrite on untiled buffers, as otherwise
913 * it would end up going through the fenced access, and we'll get
914 * different detiling behavior between reading and writing.
915 * pread/pwrite currently are reading and writing from the CPU
916 * perspective, requiring manual detiling by the client.
917 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100918 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100919 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100920 goto out;
921 }
922
923 if (obj->gtt_space &&
Daniel Vetter3ae53782012-03-25 19:47:33 +0200924 obj->cache_level == I915_CACHE_NONE &&
Daniel Vetterc07496f2012-04-13 15:51:51 +0200925 obj->tiling_mode == I915_TILING_NONE &&
Daniel Vetterffc62972012-03-25 19:47:38 +0200926 obj->map_and_fenceable &&
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100927 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100928 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200929 /* Note that the gtt paths might fail with non-page-backed user
930 * pointers (e.g. gtt mappings when moving data between
931 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700932 }
Eric Anholt673a3942008-07-30 12:06:12 -0700933
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100934 if (ret == -EFAULT)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200935 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100936
Chris Wilson35b62a82010-09-26 20:23:38 +0100937out:
Chris Wilson05394f32010-11-08 19:18:58 +0000938 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100939unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100940 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700941 return ret;
942}
943
944/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800945 * Called when user space prepares to use an object with the CPU, either
946 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700947 */
948int
949i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000950 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700951{
952 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000953 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800954 uint32_t read_domains = args->read_domains;
955 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700956 int ret;
957
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800958 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100959 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800960 return -EINVAL;
961
Chris Wilson21d509e2009-06-06 09:46:02 +0100962 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800963 return -EINVAL;
964
965 /* Having something in the write domain implies it's in the read
966 * domain, and only that read domain. Enforce that in the request.
967 */
968 if (write_domain != 0 && read_domains != write_domain)
969 return -EINVAL;
970
Chris Wilson76c1dec2010-09-25 11:22:51 +0100971 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100972 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100973 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700974
Chris Wilson05394f32010-11-08 19:18:58 +0000975 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000976 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100977 ret = -ENOENT;
978 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100979 }
Jesse Barnes652c3932009-08-17 13:31:43 -0700980
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800981 if (read_domains & I915_GEM_DOMAIN_GTT) {
982 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -0800983
984 /* Silently promote "you're not bound, there was nothing to do"
985 * to success, since the client was just asking us to
986 * make sure everything was done.
987 */
988 if (ret == -EINVAL)
989 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800990 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -0800991 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800992 }
993
Chris Wilson05394f32010-11-08 19:18:58 +0000994 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100995unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700996 mutex_unlock(&dev->struct_mutex);
997 return ret;
998}
999
1000/**
1001 * Called when user space has done writes to this buffer
1002 */
1003int
1004i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001005 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001006{
1007 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00001008 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001009 int ret = 0;
1010
Chris Wilson76c1dec2010-09-25 11:22:51 +01001011 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001012 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001013 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001014
Chris Wilson05394f32010-11-08 19:18:58 +00001015 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001016 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001017 ret = -ENOENT;
1018 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001019 }
1020
Eric Anholt673a3942008-07-30 12:06:12 -07001021 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +00001022 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001023 i915_gem_object_flush_cpu_write_domain(obj);
1024
Chris Wilson05394f32010-11-08 19:18:58 +00001025 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001026unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001027 mutex_unlock(&dev->struct_mutex);
1028 return ret;
1029}
1030
1031/**
1032 * Maps the contents of an object, returning the address it is mapped
1033 * into.
1034 *
1035 * While the mapping holds a reference on the contents of the object, it doesn't
1036 * imply a ref on the object itself.
1037 */
1038int
1039i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001040 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001041{
1042 struct drm_i915_gem_mmap *args = data;
1043 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001044 unsigned long addr;
1045
Chris Wilson05394f32010-11-08 19:18:58 +00001046 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001047 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001048 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001049
Daniel Vetter1286ff72012-05-10 15:25:09 +02001050 /* prime objects have no backing filp to GEM mmap
1051 * pages from.
1052 */
1053 if (!obj->filp) {
1054 drm_gem_object_unreference_unlocked(obj);
1055 return -EINVAL;
1056 }
1057
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001058 addr = vm_mmap(obj->filp, 0, args->size,
Eric Anholt673a3942008-07-30 12:06:12 -07001059 PROT_READ | PROT_WRITE, MAP_SHARED,
1060 args->offset);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001061 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001062 if (IS_ERR((void *)addr))
1063 return addr;
1064
1065 args->addr_ptr = (uint64_t) addr;
1066
1067 return 0;
1068}
1069
Jesse Barnesde151cf2008-11-12 10:03:55 -08001070/**
1071 * i915_gem_fault - fault a page into the GTT
1072 * vma: VMA in question
1073 * vmf: fault info
1074 *
1075 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1076 * from userspace. The fault handler takes care of binding the object to
1077 * the GTT (if needed), allocating and programming a fence register (again,
1078 * only if needed based on whether the old reg is still valid or the object
1079 * is tiled) and inserting a new PTE into the faulting process.
1080 *
1081 * Note that the faulting process may involve evicting existing objects
1082 * from the GTT and/or fence registers to make room. So performance may
1083 * suffer if the GTT working set is large or there are few fence registers
1084 * left.
1085 */
1086int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1087{
Chris Wilson05394f32010-11-08 19:18:58 +00001088 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1089 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001090 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001091 pgoff_t page_offset;
1092 unsigned long pfn;
1093 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001094 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001095
1096 /* We don't use vmf->pgoff since that has the fake offset */
1097 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1098 PAGE_SHIFT;
1099
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001100 ret = i915_mutex_lock_interruptible(dev);
1101 if (ret)
1102 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001103
Chris Wilsondb53a302011-02-03 11:57:46 +00001104 trace_i915_gem_object_fault(obj, page_offset, true, write);
1105
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001106 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001107 if (!obj->map_and_fenceable) {
1108 ret = i915_gem_object_unbind(obj);
1109 if (ret)
1110 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001111 }
Chris Wilson05394f32010-11-08 19:18:58 +00001112 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001113 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001114 if (ret)
1115 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001116
Eric Anholte92d03b2011-06-14 16:43:09 -07001117 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1118 if (ret)
1119 goto unlock;
1120 }
Chris Wilson4a684a42010-10-28 14:44:08 +01001121
Daniel Vetter74898d72012-02-15 23:50:22 +01001122 if (!obj->has_global_gtt_mapping)
1123 i915_gem_gtt_bind_object(obj, obj->cache_level);
1124
Chris Wilson06d98132012-04-17 15:31:24 +01001125 ret = i915_gem_object_get_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001126 if (ret)
1127 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001128
Chris Wilson05394f32010-11-08 19:18:58 +00001129 if (i915_gem_object_is_inactive(obj))
1130 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001131
Chris Wilson6299f992010-11-24 12:23:44 +00001132 obj->fault_mappable = true;
1133
Daniel Vetterdd2757f2012-06-07 15:55:57 +02001134 pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001135 page_offset;
1136
1137 /* Finally, remap it using the new GTT offset */
1138 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001139unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001140 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001141out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001142 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001143 case -EIO:
Daniel Vettera9340cc2012-07-04 22:18:42 +02001144 /* If this -EIO is due to a gpu hang, give the reset code a
1145 * chance to clean up the mess. Otherwise return the proper
1146 * SIGBUS. */
1147 if (!atomic_read(&dev_priv->mm.wedged))
1148 return VM_FAULT_SIGBUS;
Chris Wilson045e7692010-11-07 09:18:22 +00001149 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001150 /* Give the error handler a chance to run and move the
1151 * objects off the GPU active list. Next time we service the
1152 * fault, we should be able to transition the page into the
1153 * GTT without touching the GPU (and so avoid further
1154 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1155 * with coherency, just lost writes.
1156 */
Chris Wilson045e7692010-11-07 09:18:22 +00001157 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001158 case 0:
1159 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001160 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +01001161 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001162 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001163 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001164 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001165 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001166 }
1167}
1168
1169/**
Chris Wilson901782b2009-07-10 08:18:50 +01001170 * i915_gem_release_mmap - remove physical page mappings
1171 * @obj: obj in question
1172 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001173 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001174 * relinquish ownership of the pages back to the system.
1175 *
1176 * It is vital that we remove the page mapping if we have mapped a tiled
1177 * object through the GTT and then lose the fence register due to
1178 * resource pressure. Similarly if the object has been moved out of the
1179 * aperture, than pages mapped into userspace must be revoked. Removing the
1180 * mapping will then trigger a page fault on the next user access, allowing
1181 * fixup by i915_gem_fault().
1182 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001183void
Chris Wilson05394f32010-11-08 19:18:58 +00001184i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001185{
Chris Wilson6299f992010-11-24 12:23:44 +00001186 if (!obj->fault_mappable)
1187 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001188
Chris Wilsonf6e47882011-03-20 21:09:12 +00001189 if (obj->base.dev->dev_mapping)
1190 unmap_mapping_range(obj->base.dev->dev_mapping,
1191 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1192 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001193
Chris Wilson6299f992010-11-24 12:23:44 +00001194 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001195}
1196
Chris Wilson92b88ae2010-11-09 11:47:32 +00001197static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001198i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001199{
Chris Wilsone28f8712011-07-18 13:11:49 -07001200 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001201
1202 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001203 tiling_mode == I915_TILING_NONE)
1204 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001205
1206 /* Previous chips need a power-of-two fence region when tiling */
1207 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001208 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001209 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001210 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001211
Chris Wilsone28f8712011-07-18 13:11:49 -07001212 while (gtt_size < size)
1213 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001214
Chris Wilsone28f8712011-07-18 13:11:49 -07001215 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001216}
1217
Jesse Barnesde151cf2008-11-12 10:03:55 -08001218/**
1219 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1220 * @obj: object to check
1221 *
1222 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001223 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001224 */
1225static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001226i915_gem_get_gtt_alignment(struct drm_device *dev,
1227 uint32_t size,
1228 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001229{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001230 /*
1231 * Minimum alignment is 4k (GTT page size), but might be greater
1232 * if a fence register is needed for the object.
1233 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001234 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001235 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001236 return 4096;
1237
1238 /*
1239 * Previous chips need to be aligned to the size of the smallest
1240 * fence register that can contain the object.
1241 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001242 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001243}
1244
Daniel Vetter5e783302010-11-14 22:32:36 +01001245/**
1246 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1247 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001248 * @dev: the device
1249 * @size: size of the object
1250 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001251 *
1252 * Return the required GTT alignment for an object, only taking into account
1253 * unfenced tiled surface requirements.
1254 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001255uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001256i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1257 uint32_t size,
1258 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001259{
Daniel Vetter5e783302010-11-14 22:32:36 +01001260 /*
1261 * Minimum alignment is 4k (GTT page size) for sane hw.
1262 */
1263 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001264 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001265 return 4096;
1266
Chris Wilsone28f8712011-07-18 13:11:49 -07001267 /* Previous hardware however needs to be aligned to a power-of-two
1268 * tile height. The simplest method for determining this is to reuse
1269 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001270 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001271 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001272}
1273
Jesse Barnesde151cf2008-11-12 10:03:55 -08001274int
Dave Airlieff72145b2011-02-07 12:16:14 +10001275i915_gem_mmap_gtt(struct drm_file *file,
1276 struct drm_device *dev,
1277 uint32_t handle,
1278 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001279{
Chris Wilsonda761a62010-10-27 17:37:08 +01001280 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001281 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001282 int ret;
1283
Chris Wilson76c1dec2010-09-25 11:22:51 +01001284 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001285 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001286 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001287
Dave Airlieff72145b2011-02-07 12:16:14 +10001288 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001289 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001290 ret = -ENOENT;
1291 goto unlock;
1292 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001293
Chris Wilson05394f32010-11-08 19:18:58 +00001294 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001295 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001296 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001297 }
1298
Chris Wilson05394f32010-11-08 19:18:58 +00001299 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001300 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001301 ret = -EINVAL;
1302 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001303 }
1304
Chris Wilson05394f32010-11-08 19:18:58 +00001305 if (!obj->base.map_list.map) {
Rob Clarkb464e9a2011-08-10 08:09:08 -05001306 ret = drm_gem_create_mmap_offset(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001307 if (ret)
1308 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001309 }
1310
Dave Airlieff72145b2011-02-07 12:16:14 +10001311 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001312
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001313out:
Chris Wilson05394f32010-11-08 19:18:58 +00001314 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001315unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001316 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001317 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001318}
1319
Dave Airlieff72145b2011-02-07 12:16:14 +10001320/**
1321 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1322 * @dev: DRM device
1323 * @data: GTT mapping ioctl data
1324 * @file: GEM object info
1325 *
1326 * Simply returns the fake offset to userspace so it can mmap it.
1327 * The mmap call will end up in drm_gem_mmap(), which will set things
1328 * up so we can get faults in the handler above.
1329 *
1330 * The fault handler will take care of binding the object into the GTT
1331 * (since it may have been evicted to make room for something), allocating
1332 * a fence register, and mapping the appropriate aperture address into
1333 * userspace.
1334 */
1335int
1336i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1337 struct drm_file *file)
1338{
1339 struct drm_i915_gem_mmap_gtt *args = data;
1340
Dave Airlieff72145b2011-02-07 12:16:14 +10001341 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1342}
1343
Daniel Vetter1286ff72012-05-10 15:25:09 +02001344int
Chris Wilson05394f32010-11-08 19:18:58 +00001345i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001346 gfp_t gfpmask)
1347{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001348 int page_count, i;
1349 struct address_space *mapping;
1350 struct inode *inode;
1351 struct page *page;
1352
Daniel Vetter1286ff72012-05-10 15:25:09 +02001353 if (obj->pages || obj->sg_table)
1354 return 0;
1355
Chris Wilsone5281cc2010-10-28 13:45:36 +01001356 /* Get the list of pages out of our struct file. They'll be pinned
1357 * at this point until we release them.
1358 */
Chris Wilson05394f32010-11-08 19:18:58 +00001359 page_count = obj->base.size / PAGE_SIZE;
1360 BUG_ON(obj->pages != NULL);
1361 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1362 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001363 return -ENOMEM;
1364
Chris Wilson05394f32010-11-08 19:18:58 +00001365 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001366 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001367 gfpmask |= mapping_gfp_mask(mapping);
1368
Chris Wilsone5281cc2010-10-28 13:45:36 +01001369 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001370 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001371 if (IS_ERR(page))
1372 goto err_pages;
1373
Chris Wilson05394f32010-11-08 19:18:58 +00001374 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001375 }
1376
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001377 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilsone5281cc2010-10-28 13:45:36 +01001378 i915_gem_object_do_bit_17_swizzle(obj);
1379
1380 return 0;
1381
1382err_pages:
1383 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001384 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001385
Chris Wilson05394f32010-11-08 19:18:58 +00001386 drm_free_large(obj->pages);
1387 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001388 return PTR_ERR(page);
1389}
1390
Chris Wilson5cdf5882010-09-27 15:51:07 +01001391static void
Chris Wilson05394f32010-11-08 19:18:58 +00001392i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001393{
Chris Wilson05394f32010-11-08 19:18:58 +00001394 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001395 int i;
1396
Daniel Vetter1286ff72012-05-10 15:25:09 +02001397 if (!obj->pages)
1398 return;
1399
Chris Wilson05394f32010-11-08 19:18:58 +00001400 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001401
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001402 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001403 i915_gem_object_save_bit_17_swizzle(obj);
1404
Chris Wilson05394f32010-11-08 19:18:58 +00001405 if (obj->madv == I915_MADV_DONTNEED)
1406 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001407
1408 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001409 if (obj->dirty)
1410 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001411
Chris Wilson05394f32010-11-08 19:18:58 +00001412 if (obj->madv == I915_MADV_WILLNEED)
1413 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001414
Chris Wilson05394f32010-11-08 19:18:58 +00001415 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001416 }
Chris Wilson05394f32010-11-08 19:18:58 +00001417 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001418
Chris Wilson05394f32010-11-08 19:18:58 +00001419 drm_free_large(obj->pages);
1420 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001421}
1422
Chris Wilson54cf91d2010-11-25 18:00:26 +00001423void
Chris Wilson05394f32010-11-08 19:18:58 +00001424i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001425 struct intel_ring_buffer *ring,
1426 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001427{
Chris Wilson05394f32010-11-08 19:18:58 +00001428 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001429 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001430
Zou Nan hai852835f2010-05-21 09:08:56 +08001431 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001432 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001433
1434 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001435 if (!obj->active) {
1436 drm_gem_object_reference(&obj->base);
1437 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001438 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001439
Eric Anholt673a3942008-07-30 12:06:12 -07001440 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001441 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1442 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001443
Chris Wilson05394f32010-11-08 19:18:58 +00001444 obj->last_rendering_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001445
Chris Wilsoncaea7472010-11-12 13:53:37 +00001446 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001447 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001448
Chris Wilson7dd49062012-03-21 10:48:18 +00001449 /* Bump MRU to take account of the delayed flush */
1450 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1451 struct drm_i915_fence_reg *reg;
1452
1453 reg = &dev_priv->fence_regs[obj->fence_reg];
1454 list_move_tail(&reg->lru_list,
1455 &dev_priv->mm.fence_list);
1456 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001457 }
1458}
1459
1460static void
1461i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1462{
1463 list_del_init(&obj->ring_list);
1464 obj->last_rendering_seqno = 0;
Daniel Vetter15a13bb2012-04-12 01:27:57 +02001465 obj->last_fenced_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001466}
1467
Eric Anholtce44b0e2008-11-06 16:00:31 -08001468static void
Chris Wilson05394f32010-11-08 19:18:58 +00001469i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001470{
Chris Wilson05394f32010-11-08 19:18:58 +00001471 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001472 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001473
Chris Wilson05394f32010-11-08 19:18:58 +00001474 BUG_ON(!obj->active);
1475 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001476
1477 i915_gem_object_move_off_active(obj);
1478}
1479
1480static void
1481i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1482{
1483 struct drm_device *dev = obj->base.dev;
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485
Chris Wilson1b502472012-04-24 15:47:30 +01001486 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001487
1488 BUG_ON(!list_empty(&obj->gpu_write_list));
1489 BUG_ON(!obj->active);
1490 obj->ring = NULL;
1491
1492 i915_gem_object_move_off_active(obj);
1493 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001494
1495 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001496 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001497 drm_gem_object_unreference(&obj->base);
1498
1499 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001500}
Eric Anholt673a3942008-07-30 12:06:12 -07001501
Chris Wilson963b4832009-09-20 23:03:54 +01001502/* Immediately discard the backing storage */
1503static void
Chris Wilson05394f32010-11-08 19:18:58 +00001504i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001505{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001506 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001507
Chris Wilsonae9fed62010-08-07 11:01:30 +01001508 /* Our goal here is to return as much of the memory as
1509 * is possible back to the system as we are called from OOM.
1510 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001511 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001512 */
Chris Wilson05394f32010-11-08 19:18:58 +00001513 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001514 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001515
Chris Wilsona14917e2012-02-24 21:13:38 +00001516 if (obj->base.map_list.map)
1517 drm_gem_free_mmap_offset(&obj->base);
1518
Chris Wilson05394f32010-11-08 19:18:58 +00001519 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001520}
1521
1522static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001523i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001524{
Chris Wilson05394f32010-11-08 19:18:58 +00001525 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001526}
1527
Eric Anholt673a3942008-07-30 12:06:12 -07001528static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001529i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1530 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001531{
Chris Wilson05394f32010-11-08 19:18:58 +00001532 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001533
Chris Wilson05394f32010-11-08 19:18:58 +00001534 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001535 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001536 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001537 if (obj->base.write_domain & flush_domains) {
1538 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001539
Chris Wilson05394f32010-11-08 19:18:58 +00001540 obj->base.write_domain = 0;
1541 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001542 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001543 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001544
Daniel Vetter63560392010-02-19 11:51:59 +01001545 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001546 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001547 old_write_domain);
1548 }
1549 }
1550}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001551
Daniel Vetter53d227f2012-01-25 16:32:49 +01001552static u32
1553i915_gem_get_seqno(struct drm_device *dev)
1554{
1555 drm_i915_private_t *dev_priv = dev->dev_private;
1556 u32 seqno = dev_priv->next_seqno;
1557
1558 /* reserve 0 for non-seqno */
1559 if (++dev_priv->next_seqno == 0)
1560 dev_priv->next_seqno = 1;
1561
1562 return seqno;
1563}
1564
1565u32
1566i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1567{
1568 if (ring->outstanding_lazy_request == 0)
1569 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1570
1571 return ring->outstanding_lazy_request;
1572}
1573
Chris Wilson3cce4692010-10-27 16:11:02 +01001574int
Chris Wilsondb53a302011-02-03 11:57:46 +00001575i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001576 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001577 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001578{
Chris Wilsondb53a302011-02-03 11:57:46 +00001579 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001580 uint32_t seqno;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001581 u32 request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001582 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001583 int ret;
1584
Daniel Vettercc889e02012-06-13 20:45:19 +02001585 /*
1586 * Emit any outstanding flushes - execbuf can fail to emit the flush
1587 * after having emitted the batchbuffer command. Hence we need to fix
1588 * things up similar to emitting the lazy request. The difference here
1589 * is that the flush _must_ happen before the next request, no matter
1590 * what.
1591 */
1592 if (ring->gpu_caches_dirty) {
1593 ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
1594 if (ret)
1595 return ret;
1596
1597 ring->gpu_caches_dirty = false;
1598 }
1599
Chris Wilson3cce4692010-10-27 16:11:02 +01001600 BUG_ON(request == NULL);
Daniel Vetter53d227f2012-01-25 16:32:49 +01001601 seqno = i915_gem_next_request_seqno(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001602
Chris Wilsona71d8d92012-02-15 11:25:36 +00001603 /* Record the position of the start of the request so that
1604 * should we detect the updated seqno part-way through the
1605 * GPU processing the request, we never over-estimate the
1606 * position of the head.
1607 */
1608 request_ring_position = intel_ring_get_tail(ring);
1609
Chris Wilson3cce4692010-10-27 16:11:02 +01001610 ret = ring->add_request(ring, &seqno);
1611 if (ret)
1612 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001613
Chris Wilsondb53a302011-02-03 11:57:46 +00001614 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001615
1616 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001617 request->ring = ring;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001618 request->tail = request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001619 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001620 was_empty = list_empty(&ring->request_list);
1621 list_add_tail(&request->list, &ring->request_list);
1622
Chris Wilsondb53a302011-02-03 11:57:46 +00001623 if (file) {
1624 struct drm_i915_file_private *file_priv = file->driver_priv;
1625
Chris Wilson1c255952010-09-26 11:03:27 +01001626 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001627 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001628 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001629 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001630 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001631 }
Eric Anholt673a3942008-07-30 12:06:12 -07001632
Daniel Vetter5391d0c2012-01-25 14:03:57 +01001633 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00001634
Ben Gamarif65d9422009-09-14 17:48:44 -04001635 if (!dev_priv->mm.suspended) {
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001636 if (i915_enable_hangcheck) {
1637 mod_timer(&dev_priv->hangcheck_timer,
1638 jiffies +
1639 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1640 }
Ben Gamarif65d9422009-09-14 17:48:44 -04001641 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001642 queue_delayed_work(dev_priv->wq,
1643 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001644 }
Daniel Vettercc889e02012-06-13 20:45:19 +02001645
1646 WARN_ON(!list_empty(&ring->gpu_write_list));
1647
Chris Wilson3cce4692010-10-27 16:11:02 +01001648 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001649}
1650
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001651static inline void
1652i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001653{
Chris Wilson1c255952010-09-26 11:03:27 +01001654 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001655
Chris Wilson1c255952010-09-26 11:03:27 +01001656 if (!file_priv)
1657 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001658
Chris Wilson1c255952010-09-26 11:03:27 +01001659 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001660 if (request->file_priv) {
1661 list_del(&request->client_list);
1662 request->file_priv = NULL;
1663 }
Chris Wilson1c255952010-09-26 11:03:27 +01001664 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001665}
1666
Chris Wilsondfaae392010-09-22 10:31:52 +01001667static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1668 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001669{
Chris Wilsondfaae392010-09-22 10:31:52 +01001670 while (!list_empty(&ring->request_list)) {
1671 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001672
Chris Wilsondfaae392010-09-22 10:31:52 +01001673 request = list_first_entry(&ring->request_list,
1674 struct drm_i915_gem_request,
1675 list);
1676
1677 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001678 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001679 kfree(request);
1680 }
1681
1682 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001683 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001684
Chris Wilson05394f32010-11-08 19:18:58 +00001685 obj = list_first_entry(&ring->active_list,
1686 struct drm_i915_gem_object,
1687 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001688
Chris Wilson05394f32010-11-08 19:18:58 +00001689 obj->base.write_domain = 0;
1690 list_del_init(&obj->gpu_write_list);
1691 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001692 }
Eric Anholt673a3942008-07-30 12:06:12 -07001693}
1694
Chris Wilson312817a2010-11-22 11:50:11 +00001695static void i915_gem_reset_fences(struct drm_device *dev)
1696{
1697 struct drm_i915_private *dev_priv = dev->dev_private;
1698 int i;
1699
Daniel Vetter4b9de732011-10-09 21:52:02 +02001700 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00001701 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001702
Chris Wilsonada726c2012-04-17 15:31:32 +01001703 i915_gem_write_fence(dev, i, NULL);
Chris Wilson7d2cb392010-11-27 17:38:29 +00001704
Chris Wilsonada726c2012-04-17 15:31:32 +01001705 if (reg->obj)
1706 i915_gem_object_fence_lost(reg->obj);
Chris Wilson7d2cb392010-11-27 17:38:29 +00001707
Chris Wilsonada726c2012-04-17 15:31:32 +01001708 reg->pin_count = 0;
1709 reg->obj = NULL;
1710 INIT_LIST_HEAD(&reg->lru_list);
Chris Wilson312817a2010-11-22 11:50:11 +00001711 }
Chris Wilsonada726c2012-04-17 15:31:32 +01001712
1713 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson312817a2010-11-22 11:50:11 +00001714}
1715
Chris Wilson069efc12010-09-30 16:53:18 +01001716void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001717{
Chris Wilsondfaae392010-09-22 10:31:52 +01001718 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001719 struct drm_i915_gem_object *obj;
Chris Wilsonb4519512012-05-11 14:29:30 +01001720 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001721 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001722
Chris Wilsonb4519512012-05-11 14:29:30 +01001723 for_each_ring(ring, dev_priv, i)
1724 i915_gem_reset_ring_lists(dev_priv, ring);
Chris Wilsondfaae392010-09-22 10:31:52 +01001725
1726 /* Remove anything from the flushing lists. The GPU cache is likely
1727 * to be lost on reset along with the data, so simply move the
1728 * lost bo to the inactive list.
1729 */
1730 while (!list_empty(&dev_priv->mm.flushing_list)) {
Akshay Joshi0206e352011-08-16 15:34:10 -04001731 obj = list_first_entry(&dev_priv->mm.flushing_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001732 struct drm_i915_gem_object,
1733 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001734
Chris Wilson05394f32010-11-08 19:18:58 +00001735 obj->base.write_domain = 0;
1736 list_del_init(&obj->gpu_write_list);
1737 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001738 }
Chris Wilson9375e442010-09-19 12:21:28 +01001739
Chris Wilsondfaae392010-09-22 10:31:52 +01001740 /* Move everything out of the GPU domains to ensure we do any
1741 * necessary invalidation upon reuse.
1742 */
Chris Wilson05394f32010-11-08 19:18:58 +00001743 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001744 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001745 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001746 {
Chris Wilson05394f32010-11-08 19:18:58 +00001747 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001748 }
Chris Wilson069efc12010-09-30 16:53:18 +01001749
1750 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001751 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001752}
1753
1754/**
1755 * This function clears the request list as sequence numbers are passed.
1756 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001757void
Chris Wilsondb53a302011-02-03 11:57:46 +00001758i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001759{
Eric Anholt673a3942008-07-30 12:06:12 -07001760 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001761 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001762
Chris Wilsondb53a302011-02-03 11:57:46 +00001763 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001764 return;
1765
Chris Wilsondb53a302011-02-03 11:57:46 +00001766 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001767
Chris Wilson78501ea2010-10-27 12:18:21 +01001768 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001769
Chris Wilson076e2c02011-01-21 10:07:18 +00001770 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001771 if (seqno >= ring->sync_seqno[i])
1772 ring->sync_seqno[i] = 0;
1773
Zou Nan hai852835f2010-05-21 09:08:56 +08001774 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001775 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001776
Zou Nan hai852835f2010-05-21 09:08:56 +08001777 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001778 struct drm_i915_gem_request,
1779 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001780
Chris Wilsondfaae392010-09-22 10:31:52 +01001781 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001782 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001783
Chris Wilsondb53a302011-02-03 11:57:46 +00001784 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001785 /* We know the GPU must have read the request to have
1786 * sent us the seqno + interrupt, so use the position
1787 * of tail of the request to update the last known position
1788 * of the GPU head.
1789 */
1790 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001791
1792 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001793 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001794 kfree(request);
1795 }
1796
1797 /* Move any buffers on the active list that are no longer referenced
1798 * by the ringbuffer to the flushing/inactive lists as appropriate.
1799 */
1800 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001801 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001802
Akshay Joshi0206e352011-08-16 15:34:10 -04001803 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001804 struct drm_i915_gem_object,
1805 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001806
Chris Wilson05394f32010-11-08 19:18:58 +00001807 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001808 break;
1809
Chris Wilson05394f32010-11-08 19:18:58 +00001810 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001811 i915_gem_object_move_to_flushing(obj);
1812 else
1813 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001814 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001815
Chris Wilsondb53a302011-02-03 11:57:46 +00001816 if (unlikely(ring->trace_irq_seqno &&
1817 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001818 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001819 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001820 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001821
Chris Wilsondb53a302011-02-03 11:57:46 +00001822 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001823}
1824
1825void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001826i915_gem_retire_requests(struct drm_device *dev)
1827{
1828 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01001829 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001830 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001831
Chris Wilsonb4519512012-05-11 14:29:30 +01001832 for_each_ring(ring, dev_priv, i)
1833 i915_gem_retire_requests_ring(ring);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001834}
1835
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001836static void
Eric Anholt673a3942008-07-30 12:06:12 -07001837i915_gem_retire_work_handler(struct work_struct *work)
1838{
1839 drm_i915_private_t *dev_priv;
1840 struct drm_device *dev;
Chris Wilsonb4519512012-05-11 14:29:30 +01001841 struct intel_ring_buffer *ring;
Chris Wilson0a587052011-01-09 21:05:44 +00001842 bool idle;
1843 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001844
1845 dev_priv = container_of(work, drm_i915_private_t,
1846 mm.retire_work.work);
1847 dev = dev_priv->dev;
1848
Chris Wilson891b48c2010-09-29 12:26:37 +01001849 /* Come back later if the device is busy... */
1850 if (!mutex_trylock(&dev->struct_mutex)) {
1851 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1852 return;
1853 }
1854
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001855 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001856
Chris Wilson0a587052011-01-09 21:05:44 +00001857 /* Send a periodic flush down the ring so we don't hold onto GEM
1858 * objects indefinitely.
1859 */
1860 idle = true;
Chris Wilsonb4519512012-05-11 14:29:30 +01001861 for_each_ring(ring, dev_priv, i) {
Daniel Vettercc889e02012-06-13 20:45:19 +02001862 if (ring->gpu_caches_dirty) {
Chris Wilson0a587052011-01-09 21:05:44 +00001863 struct drm_i915_gem_request *request;
Chris Wilson0a587052011-01-09 21:05:44 +00001864
Chris Wilson0a587052011-01-09 21:05:44 +00001865 request = kzalloc(sizeof(*request), GFP_KERNEL);
Daniel Vettercc889e02012-06-13 20:45:19 +02001866 if (request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00001867 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00001868 kfree(request);
1869 }
1870
1871 idle &= list_empty(&ring->request_list);
1872 }
1873
1874 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001875 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001876
Eric Anholt673a3942008-07-30 12:06:12 -07001877 mutex_unlock(&dev->struct_mutex);
1878}
1879
Daniel Vetterd6b2c792012-07-04 22:54:13 +02001880int
1881i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1882 bool interruptible)
Ben Widawskyb4aca012012-04-25 20:50:12 -07001883{
Ben Widawskyb4aca012012-04-25 20:50:12 -07001884 if (atomic_read(&dev_priv->mm.wedged)) {
1885 struct completion *x = &dev_priv->error_completion;
1886 bool recovery_complete;
1887 unsigned long flags;
1888
1889 /* Give the error handler a chance to run. */
1890 spin_lock_irqsave(&x->wait.lock, flags);
1891 recovery_complete = x->done > 0;
1892 spin_unlock_irqrestore(&x->wait.lock, flags);
1893
Daniel Vetterd6b2c792012-07-04 22:54:13 +02001894 /* Non-interruptible callers can't handle -EAGAIN, hence return
1895 * -EIO unconditionally for these. */
1896 if (!interruptible)
1897 return -EIO;
1898
1899 /* Recovery complete, but still wedged means reset failure. */
1900 if (recovery_complete)
1901 return -EIO;
1902
1903 return -EAGAIN;
Ben Widawskyb4aca012012-04-25 20:50:12 -07001904 }
1905
1906 return 0;
1907}
1908
1909/*
1910 * Compare seqno against outstanding lazy request. Emit a request if they are
1911 * equal.
1912 */
1913static int
1914i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1915{
1916 int ret = 0;
1917
1918 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1919
1920 if (seqno == ring->outstanding_lazy_request) {
1921 struct drm_i915_gem_request *request;
1922
1923 request = kzalloc(sizeof(*request), GFP_KERNEL);
1924 if (request == NULL)
1925 return -ENOMEM;
1926
1927 ret = i915_add_request(ring, NULL, request);
1928 if (ret) {
1929 kfree(request);
1930 return ret;
1931 }
1932
1933 BUG_ON(seqno != request->seqno);
1934 }
1935
1936 return ret;
1937}
1938
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001939/**
1940 * __wait_seqno - wait until execution of seqno has finished
1941 * @ring: the ring expected to report seqno
1942 * @seqno: duh!
1943 * @interruptible: do an interruptible wait (normally yes)
1944 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1945 *
1946 * Returns 0 if the seqno was found within the alloted time. Else returns the
1947 * errno with remaining time filled in timeout argument.
1948 */
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001949static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001950 bool interruptible, struct timespec *timeout)
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001951{
1952 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001953 struct timespec before, now, wait_time={1,0};
1954 unsigned long timeout_jiffies;
1955 long end;
1956 bool wait_forever = true;
Daniel Vetterd6b2c792012-07-04 22:54:13 +02001957 int ret;
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001958
1959 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1960 return 0;
1961
1962 trace_i915_gem_request_wait_begin(ring, seqno);
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001963
1964 if (timeout != NULL) {
1965 wait_time = *timeout;
1966 wait_forever = false;
1967 }
1968
1969 timeout_jiffies = timespec_to_jiffies(&wait_time);
1970
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001971 if (WARN_ON(!ring->irq_get(ring)))
1972 return -ENODEV;
1973
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001974 /* Record current time in case interrupted by signal, or wedged * */
1975 getrawmonotonic(&before);
1976
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001977#define EXIT_COND \
1978 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1979 atomic_read(&dev_priv->mm.wedged))
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001980 do {
1981 if (interruptible)
1982 end = wait_event_interruptible_timeout(ring->irq_queue,
1983 EXIT_COND,
1984 timeout_jiffies);
1985 else
1986 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1987 timeout_jiffies);
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001988
Daniel Vetterd6b2c792012-07-04 22:54:13 +02001989 ret = i915_gem_check_wedge(dev_priv, interruptible);
1990 if (ret)
1991 end = ret;
Ben Widawsky5c81fe852012-05-24 15:03:08 -07001992 } while (end == 0 && wait_forever);
1993
1994 getrawmonotonic(&now);
Ben Widawsky604dd3e2012-04-26 16:03:03 -07001995
1996 ring->irq_put(ring);
1997 trace_i915_gem_request_wait_end(ring, seqno);
1998#undef EXIT_COND
1999
Ben Widawsky5c81fe852012-05-24 15:03:08 -07002000 if (timeout) {
2001 struct timespec sleep_time = timespec_sub(now, before);
2002 *timeout = timespec_sub(*timeout, sleep_time);
2003 }
2004
2005 switch (end) {
Chris Wilsoneeef9b32012-07-16 13:05:34 +01002006 case -EIO:
Ben Widawsky5c81fe852012-05-24 15:03:08 -07002007 case -EAGAIN: /* Wedged */
2008 case -ERESTARTSYS: /* Signal */
2009 return (int)end;
2010 case 0: /* Timeout */
2011 if (timeout)
2012 set_normalized_timespec(timeout, 0, 0);
2013 return -ETIME;
2014 default: /* Completed */
2015 WARN_ON(end < 0); /* We're not aware of other errors */
2016 return 0;
2017 }
Ben Widawsky604dd3e2012-04-26 16:03:03 -07002018}
2019
Chris Wilsondb53a302011-02-03 11:57:46 +00002020/**
2021 * Waits for a sequence number to be signaled, and cleans up the
2022 * request and object lists appropriately for that event.
2023 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02002024int
Ben Widawsky199b2bc2012-05-24 15:03:11 -07002025i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07002026{
Chris Wilsondb53a302011-02-03 11:57:46 +00002027 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002028 int ret = 0;
2029
2030 BUG_ON(seqno == 0);
2031
Daniel Vetterd6b2c792012-07-04 22:54:13 +02002032 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
Ben Widawskyb4aca012012-04-25 20:50:12 -07002033 if (ret)
2034 return ret;
Chris Wilsond9bc7e92011-02-07 13:09:31 +00002035
Ben Widawskyb4aca012012-04-25 20:50:12 -07002036 ret = i915_gem_check_olr(ring, seqno);
2037 if (ret)
2038 return ret;
Daniel Vettere35a41d2010-02-11 22:13:59 +01002039
Ben Widawsky5c81fe852012-05-24 15:03:08 -07002040 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07002041
Eric Anholt673a3942008-07-30 12:06:12 -07002042 return ret;
2043}
2044
Daniel Vetter48764bf2009-09-15 22:57:32 +02002045/**
Eric Anholt673a3942008-07-30 12:06:12 -07002046 * Ensures that all rendering to the object has completed and the object is
2047 * safe to unbind from the GTT or access from the CPU.
2048 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00002049int
Chris Wilsonce453d82011-02-21 14:43:56 +00002050i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002051{
Eric Anholt673a3942008-07-30 12:06:12 -07002052 int ret;
2053
Eric Anholte47c68e2008-11-14 13:35:19 -08002054 /* This function only exists to support waiting for existing rendering,
2055 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07002056 */
Chris Wilson05394f32010-11-08 19:18:58 +00002057 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07002058
2059 /* If there is rendering queued on the buffer being evicted, wait for
2060 * it.
2061 */
Chris Wilson05394f32010-11-08 19:18:58 +00002062 if (obj->active) {
Ben Widawsky199b2bc2012-05-24 15:03:11 -07002063 ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
Chris Wilson2cf34d72010-09-14 13:03:28 +01002064 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002065 return ret;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002066 i915_gem_retire_requests_ring(obj->ring);
Eric Anholt673a3942008-07-30 12:06:12 -07002067 }
2068
2069 return 0;
2070}
2071
Ben Widawsky5816d642012-04-11 11:18:19 -07002072/**
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002073 * Ensures that an object will eventually get non-busy by flushing any required
2074 * write domains, emitting any outstanding lazy request and retiring and
2075 * completed requests.
2076 */
2077static int
2078i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2079{
2080 int ret;
2081
2082 if (obj->active) {
2083 ret = i915_gem_object_flush_gpu_write_domain(obj);
2084 if (ret)
2085 return ret;
2086
2087 ret = i915_gem_check_olr(obj->ring,
2088 obj->last_rendering_seqno);
2089 if (ret)
2090 return ret;
2091 i915_gem_retire_requests_ring(obj->ring);
2092 }
2093
2094 return 0;
2095}
2096
2097/**
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002098 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2099 * @DRM_IOCTL_ARGS: standard ioctl arguments
2100 *
2101 * Returns 0 if successful, else an error is returned with the remaining time in
2102 * the timeout parameter.
2103 * -ETIME: object is still busy after timeout
2104 * -ERESTARTSYS: signal interrupted the wait
2105 * -ENONENT: object doesn't exist
2106 * Also possible, but rare:
2107 * -EAGAIN: GPU wedged
2108 * -ENOMEM: damn
2109 * -ENODEV: Internal IRQ fail
2110 * -E?: The add request failed
2111 *
2112 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2113 * non-zero timeout parameter the wait ioctl will wait for the given number of
2114 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2115 * without holding struct_mutex the object may become re-busied before this
2116 * function completes. A similar but shorter * race condition exists in the busy
2117 * ioctl
2118 */
2119int
2120i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2121{
2122 struct drm_i915_gem_wait *args = data;
2123 struct drm_i915_gem_object *obj;
2124 struct intel_ring_buffer *ring = NULL;
Ben Widawskyeac1f142012-06-05 15:24:24 -07002125 struct timespec timeout_stack, *timeout = NULL;
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002126 u32 seqno = 0;
2127 int ret = 0;
2128
Ben Widawskyeac1f142012-06-05 15:24:24 -07002129 if (args->timeout_ns >= 0) {
2130 timeout_stack = ns_to_timespec(args->timeout_ns);
2131 timeout = &timeout_stack;
2132 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002133
2134 ret = i915_mutex_lock_interruptible(dev);
2135 if (ret)
2136 return ret;
2137
2138 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2139 if (&obj->base == NULL) {
2140 mutex_unlock(&dev->struct_mutex);
2141 return -ENOENT;
2142 }
2143
Daniel Vetter30dfebf2012-06-01 15:21:23 +02002144 /* Need to make sure the object gets inactive eventually. */
2145 ret = i915_gem_object_flush_active(obj);
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002146 if (ret)
2147 goto out;
2148
2149 if (obj->active) {
2150 seqno = obj->last_rendering_seqno;
2151 ring = obj->ring;
2152 }
2153
2154 if (seqno == 0)
2155 goto out;
2156
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002157 /* Do this after OLR check to make sure we make forward progress polling
2158 * on this IOCTL with a 0 timeout (like busy ioctl)
2159 */
2160 if (!args->timeout_ns) {
2161 ret = -ETIME;
2162 goto out;
2163 }
2164
2165 drm_gem_object_unreference(&obj->base);
2166 mutex_unlock(&dev->struct_mutex);
2167
Ben Widawskyeac1f142012-06-05 15:24:24 -07002168 ret = __wait_seqno(ring, seqno, true, timeout);
2169 if (timeout) {
2170 WARN_ON(!timespec_valid(timeout));
2171 args->timeout_ns = timespec_to_ns(timeout);
2172 }
Ben Widawsky23ba4fd2012-05-24 15:03:10 -07002173 return ret;
2174
2175out:
2176 drm_gem_object_unreference(&obj->base);
2177 mutex_unlock(&dev->struct_mutex);
2178 return ret;
2179}
2180
2181/**
Ben Widawsky5816d642012-04-11 11:18:19 -07002182 * i915_gem_object_sync - sync an object to a ring.
2183 *
2184 * @obj: object which may be in use on another ring.
2185 * @to: ring we wish to use the object on. May be NULL.
2186 *
2187 * This code is meant to abstract object synchronization with the GPU.
2188 * Calling with NULL implies synchronizing the object with the CPU
2189 * rather than a particular GPU ring.
2190 *
2191 * Returns 0 if successful, else propagates up the lower layer error.
2192 */
Ben Widawsky2911a352012-04-05 14:47:36 -07002193int
2194i915_gem_object_sync(struct drm_i915_gem_object *obj,
2195 struct intel_ring_buffer *to)
2196{
2197 struct intel_ring_buffer *from = obj->ring;
2198 u32 seqno;
2199 int ret, idx;
2200
2201 if (from == NULL || to == from)
2202 return 0;
2203
Ben Widawsky5816d642012-04-11 11:18:19 -07002204 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Ben Widawsky2911a352012-04-05 14:47:36 -07002205 return i915_gem_object_wait_rendering(obj);
2206
2207 idx = intel_ring_sync_index(from, to);
2208
2209 seqno = obj->last_rendering_seqno;
2210 if (seqno <= from->sync_seqno[idx])
2211 return 0;
2212
Ben Widawskyb4aca012012-04-25 20:50:12 -07002213 ret = i915_gem_check_olr(obj->ring, seqno);
2214 if (ret)
2215 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002216
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002217 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002218 if (!ret)
2219 from->sync_seqno[idx] = seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002220
Ben Widawskye3a5a222012-04-11 11:18:20 -07002221 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002222}
2223
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002224static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2225{
2226 u32 old_write_domain, old_read_domains;
2227
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002228 /* Act a barrier for all accesses through the GTT */
2229 mb();
2230
2231 /* Force a pagefault for domain tracking on next user access */
2232 i915_gem_release_mmap(obj);
2233
Keith Packardb97c3d92011-06-24 21:02:59 -07002234 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2235 return;
2236
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002237 old_read_domains = obj->base.read_domains;
2238 old_write_domain = obj->base.write_domain;
2239
2240 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2241 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2242
2243 trace_i915_gem_object_change_domain(obj,
2244 old_read_domains,
2245 old_write_domain);
2246}
2247
Eric Anholt673a3942008-07-30 12:06:12 -07002248/**
2249 * Unbinds an object from the GTT aperture.
2250 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002251int
Chris Wilson05394f32010-11-08 19:18:58 +00002252i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002253{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002254 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002255 int ret = 0;
2256
Chris Wilson05394f32010-11-08 19:18:58 +00002257 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002258 return 0;
2259
Chris Wilson31d8d652012-05-24 19:11:20 +01002260 if (obj->pin_count)
2261 return -EBUSY;
Eric Anholt673a3942008-07-30 12:06:12 -07002262
Chris Wilsona8198ee2011-04-13 22:04:09 +01002263 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002264 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002265 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002266 /* Continue on if we fail due to EIO, the GPU is hung so we
2267 * should be safe and we need to cleanup or else we might
2268 * cause memory corruption through use-after-free.
2269 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002270
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002271 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002272
2273 /* Move the object to the CPU domain to ensure that
2274 * any possible CPU writes while it's not in the GTT
2275 * are flushed when we go to remap it.
2276 */
2277 if (ret == 0)
2278 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2279 if (ret == -ERESTARTSYS)
2280 return ret;
Chris Wilson812ed4922010-09-30 15:08:57 +01002281 if (ret) {
Chris Wilsona8198ee2011-04-13 22:04:09 +01002282 /* In the event of a disaster, abandon all caches and
2283 * hope for the best.
2284 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002285 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002286 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01002287 }
Eric Anholt673a3942008-07-30 12:06:12 -07002288
Daniel Vetter96b47b62009-12-15 17:50:00 +01002289 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002290 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002291 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002292 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002293
Chris Wilsondb53a302011-02-03 11:57:46 +00002294 trace_i915_gem_object_unbind(obj);
2295
Daniel Vetter74898d72012-02-15 23:50:22 +01002296 if (obj->has_global_gtt_mapping)
2297 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002298 if (obj->has_aliasing_ppgtt_mapping) {
2299 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2300 obj->has_aliasing_ppgtt_mapping = 0;
2301 }
Daniel Vetter74163902012-02-15 23:50:21 +01002302 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002303
Chris Wilsone5281cc2010-10-28 13:45:36 +01002304 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002305
Chris Wilson6299f992010-11-24 12:23:44 +00002306 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002307 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002308 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002309 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002310
Chris Wilson05394f32010-11-08 19:18:58 +00002311 drm_mm_put_block(obj->gtt_space);
2312 obj->gtt_space = NULL;
2313 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002314
Chris Wilson05394f32010-11-08 19:18:58 +00002315 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002316 i915_gem_object_truncate(obj);
2317
Chris Wilson8dc17752010-07-23 23:18:51 +01002318 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002319}
2320
Chris Wilson88241782011-01-07 17:09:48 +00002321int
Chris Wilsondb53a302011-02-03 11:57:46 +00002322i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00002323 uint32_t invalidate_domains,
2324 uint32_t flush_domains)
2325{
Chris Wilson88241782011-01-07 17:09:48 +00002326 int ret;
2327
Chris Wilson36d527d2011-03-19 22:26:49 +00002328 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2329 return 0;
2330
Chris Wilsondb53a302011-02-03 11:57:46 +00002331 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2332
Chris Wilson88241782011-01-07 17:09:48 +00002333 ret = ring->flush(ring, invalidate_domains, flush_domains);
2334 if (ret)
2335 return ret;
2336
Chris Wilson36d527d2011-03-19 22:26:49 +00002337 if (flush_domains & I915_GEM_GPU_DOMAINS)
2338 i915_gem_process_flushing_list(ring, flush_domains);
2339
Chris Wilson88241782011-01-07 17:09:48 +00002340 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002341}
2342
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002343static int i915_ring_idle(struct intel_ring_buffer *ring)
Chris Wilsona56ba562010-09-28 10:07:56 +01002344{
Chris Wilson88241782011-01-07 17:09:48 +00002345 int ret;
2346
Chris Wilson395b70b2010-10-28 21:28:46 +01002347 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002348 return 0;
2349
Chris Wilson88241782011-01-07 17:09:48 +00002350 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002351 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002352 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002353 if (ret)
2354 return ret;
2355 }
2356
Ben Widawsky199b2bc2012-05-24 15:03:11 -07002357 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
Chris Wilsona56ba562010-09-28 10:07:56 +01002358}
2359
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07002360int i915_gpu_idle(struct drm_device *dev)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002361{
2362 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01002363 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002364 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002365
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002366 /* Flush everything onto the inactive list. */
Chris Wilsonb4519512012-05-11 14:29:30 +01002367 for_each_ring(ring, dev_priv, i) {
2368 ret = i915_ring_idle(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002369 if (ret)
2370 return ret;
Chris Wilsonb4519512012-05-11 14:29:30 +01002371
2372 /* Is the device fubar? */
2373 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2374 return -EBUSY;
Ben Widawskyf2ef6eb2012-06-04 14:42:53 -07002375
2376 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2377 if (ret)
2378 return ret;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002379 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002380
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002381 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002382}
2383
Chris Wilson9ce079e2012-04-17 15:31:30 +01002384static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2385 struct drm_i915_gem_object *obj)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002386{
Eric Anholt4e901fd2009-10-26 16:44:17 -07002387 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002388 uint64_t val;
2389
Chris Wilson9ce079e2012-04-17 15:31:30 +01002390 if (obj) {
2391 u32 size = obj->gtt_space->size;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002392
Chris Wilson9ce079e2012-04-17 15:31:30 +01002393 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2394 0xfffff000) << 32;
2395 val |= obj->gtt_offset & 0xfffff000;
2396 val |= (uint64_t)((obj->stride / 128) - 1) <<
2397 SANDYBRIDGE_FENCE_PITCH_SHIFT;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002398
Chris Wilson9ce079e2012-04-17 15:31:30 +01002399 if (obj->tiling_mode == I915_TILING_Y)
2400 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2401 val |= I965_FENCE_REG_VALID;
2402 } else
2403 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002404
Chris Wilson9ce079e2012-04-17 15:31:30 +01002405 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2406 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002407}
2408
Chris Wilson9ce079e2012-04-17 15:31:30 +01002409static void i965_write_fence_reg(struct drm_device *dev, int reg,
2410 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002411{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002412 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002413 uint64_t val;
2414
Chris Wilson9ce079e2012-04-17 15:31:30 +01002415 if (obj) {
2416 u32 size = obj->gtt_space->size;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002417
Chris Wilson9ce079e2012-04-17 15:31:30 +01002418 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2419 0xfffff000) << 32;
2420 val |= obj->gtt_offset & 0xfffff000;
2421 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2422 if (obj->tiling_mode == I915_TILING_Y)
2423 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2424 val |= I965_FENCE_REG_VALID;
2425 } else
2426 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002427
Chris Wilson9ce079e2012-04-17 15:31:30 +01002428 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2429 POSTING_READ(FENCE_REG_965_0 + reg * 8);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002430}
2431
Chris Wilson9ce079e2012-04-17 15:31:30 +01002432static void i915_write_fence_reg(struct drm_device *dev, int reg,
2433 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002434{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002435 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002436 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002437
Chris Wilson9ce079e2012-04-17 15:31:30 +01002438 if (obj) {
2439 u32 size = obj->gtt_space->size;
2440 int pitch_val;
2441 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002442
Chris Wilson9ce079e2012-04-17 15:31:30 +01002443 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2444 (size & -size) != size ||
2445 (obj->gtt_offset & (size - 1)),
2446 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2447 obj->gtt_offset, obj->map_and_fenceable, size);
2448
2449 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2450 tile_width = 128;
2451 else
2452 tile_width = 512;
2453
2454 /* Note: pitch better be a power of two tile widths */
2455 pitch_val = obj->stride / tile_width;
2456 pitch_val = ffs(pitch_val) - 1;
2457
2458 val = obj->gtt_offset;
2459 if (obj->tiling_mode == I915_TILING_Y)
2460 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2461 val |= I915_FENCE_SIZE_BITS(size);
2462 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2463 val |= I830_FENCE_REG_VALID;
2464 } else
2465 val = 0;
2466
2467 if (reg < 8)
2468 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002469 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002470 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002471
Chris Wilson9ce079e2012-04-17 15:31:30 +01002472 I915_WRITE(reg, val);
2473 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002474}
2475
Chris Wilson9ce079e2012-04-17 15:31:30 +01002476static void i830_write_fence_reg(struct drm_device *dev, int reg,
2477 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002478{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002479 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002480 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002481
Chris Wilson9ce079e2012-04-17 15:31:30 +01002482 if (obj) {
2483 u32 size = obj->gtt_space->size;
2484 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002485
Chris Wilson9ce079e2012-04-17 15:31:30 +01002486 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2487 (size & -size) != size ||
2488 (obj->gtt_offset & (size - 1)),
2489 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2490 obj->gtt_offset, size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002491
Chris Wilson9ce079e2012-04-17 15:31:30 +01002492 pitch_val = obj->stride / 128;
2493 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002494
Chris Wilson9ce079e2012-04-17 15:31:30 +01002495 val = obj->gtt_offset;
2496 if (obj->tiling_mode == I915_TILING_Y)
2497 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2498 val |= I830_FENCE_SIZE_BITS(size);
2499 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2500 val |= I830_FENCE_REG_VALID;
2501 } else
2502 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002503
Chris Wilson9ce079e2012-04-17 15:31:30 +01002504 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2505 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2506}
2507
2508static void i915_gem_write_fence(struct drm_device *dev, int reg,
2509 struct drm_i915_gem_object *obj)
2510{
2511 switch (INTEL_INFO(dev)->gen) {
2512 case 7:
2513 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2514 case 5:
2515 case 4: i965_write_fence_reg(dev, reg, obj); break;
2516 case 3: i915_write_fence_reg(dev, reg, obj); break;
2517 case 2: i830_write_fence_reg(dev, reg, obj); break;
2518 default: break;
2519 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002520}
2521
Chris Wilson61050802012-04-17 15:31:31 +01002522static inline int fence_number(struct drm_i915_private *dev_priv,
2523 struct drm_i915_fence_reg *fence)
2524{
2525 return fence - dev_priv->fence_regs;
2526}
2527
2528static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2529 struct drm_i915_fence_reg *fence,
2530 bool enable)
2531{
2532 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2533 int reg = fence_number(dev_priv, fence);
2534
2535 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2536
2537 if (enable) {
2538 obj->fence_reg = reg;
2539 fence->obj = obj;
2540 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2541 } else {
2542 obj->fence_reg = I915_FENCE_REG_NONE;
2543 fence->obj = NULL;
2544 list_del_init(&fence->lru_list);
2545 }
2546}
2547
Chris Wilsond9e86c02010-11-10 16:40:20 +00002548static int
Chris Wilsona360bb12012-04-17 15:31:25 +01002549i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002550{
2551 int ret;
2552
2553 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002554 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilson1c293ea2012-04-17 15:31:27 +01002555 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00002556 0, obj->base.write_domain);
2557 if (ret)
2558 return ret;
2559 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002560
2561 obj->fenced_gpu_access = false;
2562 }
2563
Chris Wilson1c293ea2012-04-17 15:31:27 +01002564 if (obj->last_fenced_seqno) {
Ben Widawsky199b2bc2012-05-24 15:03:11 -07002565 ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
Chris Wilson18991842012-04-17 15:31:29 +01002566 if (ret)
2567 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002568
2569 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002570 }
2571
Chris Wilson63256ec2011-01-04 18:42:07 +00002572 /* Ensure that all CPU reads are completed before installing a fence
2573 * and all writes before removing the fence.
2574 */
2575 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2576 mb();
2577
Chris Wilsond9e86c02010-11-10 16:40:20 +00002578 return 0;
2579}
2580
2581int
2582i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2583{
Chris Wilson61050802012-04-17 15:31:31 +01002584 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002585 int ret;
2586
Chris Wilsona360bb12012-04-17 15:31:25 +01002587 ret = i915_gem_object_flush_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002588 if (ret)
2589 return ret;
2590
Chris Wilson61050802012-04-17 15:31:31 +01002591 if (obj->fence_reg == I915_FENCE_REG_NONE)
2592 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002593
Chris Wilson61050802012-04-17 15:31:31 +01002594 i915_gem_object_update_fence(obj,
2595 &dev_priv->fence_regs[obj->fence_reg],
2596 false);
2597 i915_gem_object_fence_lost(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002598
2599 return 0;
2600}
2601
2602static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01002603i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01002604{
Daniel Vetterae3db242010-02-19 11:51:58 +01002605 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01002606 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002607 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002608
2609 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002610 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002611 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2612 reg = &dev_priv->fence_regs[i];
2613 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002614 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002615
Chris Wilson1690e1e2011-12-14 13:57:08 +01002616 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002617 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002618 }
2619
Chris Wilsond9e86c02010-11-10 16:40:20 +00002620 if (avail == NULL)
2621 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002622
2623 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002624 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002625 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002626 continue;
2627
Chris Wilson8fe301a2012-04-17 15:31:28 +01002628 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002629 }
2630
Chris Wilson8fe301a2012-04-17 15:31:28 +01002631 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002632}
2633
Jesse Barnesde151cf2008-11-12 10:03:55 -08002634/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002635 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002636 * @obj: object to map through a fence reg
2637 *
2638 * When mapping objects through the GTT, userspace wants to be able to write
2639 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002640 * This function walks the fence regs looking for a free one for @obj,
2641 * stealing one if it can't find any.
2642 *
2643 * It then sets up the reg based on the object's properties: address, pitch
2644 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002645 *
2646 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002647 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002648int
Chris Wilson06d98132012-04-17 15:31:24 +01002649i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002650{
Chris Wilson05394f32010-11-08 19:18:58 +00002651 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002652 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01002653 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002654 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002655 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002656
Chris Wilson14415742012-04-17 15:31:33 +01002657 /* Have we updated the tiling parameters upon the object and so
2658 * will need to serialise the write to the associated fence register?
2659 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002660 if (obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01002661 ret = i915_gem_object_flush_fence(obj);
2662 if (ret)
2663 return ret;
2664 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002665
Chris Wilsond9e86c02010-11-10 16:40:20 +00002666 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002667 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2668 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002669 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01002670 list_move_tail(&reg->lru_list,
2671 &dev_priv->mm.fence_list);
2672 return 0;
2673 }
2674 } else if (enable) {
2675 reg = i915_find_fence_reg(dev);
2676 if (reg == NULL)
2677 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002678
Chris Wilson14415742012-04-17 15:31:33 +01002679 if (reg->obj) {
2680 struct drm_i915_gem_object *old = reg->obj;
2681
2682 ret = i915_gem_object_flush_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00002683 if (ret)
2684 return ret;
2685
Chris Wilson14415742012-04-17 15:31:33 +01002686 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00002687 }
Chris Wilson14415742012-04-17 15:31:33 +01002688 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07002689 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002690
Chris Wilson14415742012-04-17 15:31:33 +01002691 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002692 obj->fence_dirty = false;
Chris Wilson14415742012-04-17 15:31:33 +01002693
Chris Wilson9ce079e2012-04-17 15:31:30 +01002694 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002695}
2696
2697/**
Eric Anholt673a3942008-07-30 12:06:12 -07002698 * Finds free space in the GTT aperture and binds the object there.
2699 */
2700static int
Chris Wilson05394f32010-11-08 19:18:58 +00002701i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002702 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002703 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002704{
Chris Wilson05394f32010-11-08 19:18:58 +00002705 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002706 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002707 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002708 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002709 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002710 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002711 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002712
Chris Wilson05394f32010-11-08 19:18:58 +00002713 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002714 DRM_ERROR("Attempting to bind a purgeable object\n");
2715 return -EINVAL;
2716 }
2717
Chris Wilsone28f8712011-07-18 13:11:49 -07002718 fence_size = i915_gem_get_gtt_size(dev,
2719 obj->base.size,
2720 obj->tiling_mode);
2721 fence_alignment = i915_gem_get_gtt_alignment(dev,
2722 obj->base.size,
2723 obj->tiling_mode);
2724 unfenced_alignment =
2725 i915_gem_get_unfenced_gtt_alignment(dev,
2726 obj->base.size,
2727 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002728
Eric Anholt673a3942008-07-30 12:06:12 -07002729 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002730 alignment = map_and_fenceable ? fence_alignment :
2731 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002732 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002733 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2734 return -EINVAL;
2735 }
2736
Chris Wilson05394f32010-11-08 19:18:58 +00002737 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002738
Chris Wilson654fc602010-05-27 13:18:21 +01002739 /* If the object is bigger than the entire aperture, reject it early
2740 * before evicting everything in a vain attempt to find space.
2741 */
Chris Wilson05394f32010-11-08 19:18:58 +00002742 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002743 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002744 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2745 return -E2BIG;
2746 }
2747
Eric Anholt673a3942008-07-30 12:06:12 -07002748 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002749 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002750 free_space =
2751 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilson6b9d89b2012-07-10 11:15:23 +01002752 size, alignment,
2753 0, dev_priv->mm.gtt_mappable_end,
Daniel Vetter920afa72010-09-16 17:54:23 +02002754 0);
2755 else
2756 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002757 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002758
2759 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002760 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002761 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002762 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002763 size, alignment, 0,
Chris Wilson6b9d89b2012-07-10 11:15:23 +01002764 0, dev_priv->mm.gtt_mappable_end,
Daniel Vetter920afa72010-09-16 17:54:23 +02002765 0);
2766 else
Chris Wilson05394f32010-11-08 19:18:58 +00002767 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002768 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002769 }
Chris Wilson05394f32010-11-08 19:18:58 +00002770 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002771 /* If the gtt is empty and we're still having trouble
2772 * fitting our object in, we're out of memory.
2773 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002774 ret = i915_gem_evict_something(dev, size, alignment,
2775 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002776 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002777 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002778
Eric Anholt673a3942008-07-30 12:06:12 -07002779 goto search_free;
2780 }
2781
Chris Wilsone5281cc2010-10-28 13:45:36 +01002782 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002783 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002784 drm_mm_put_block(obj->gtt_space);
2785 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002786
2787 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002788 /* first try to reclaim some memory by clearing the GTT */
2789 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002790 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002791 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002792 if (gfpmask) {
2793 gfpmask = 0;
2794 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002795 }
2796
Chris Wilson809b6332011-01-10 17:33:15 +00002797 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002798 }
2799
2800 goto search_free;
2801 }
2802
Eric Anholt673a3942008-07-30 12:06:12 -07002803 return ret;
2804 }
2805
Daniel Vetter74163902012-02-15 23:50:21 +01002806 ret = i915_gem_gtt_prepare_object(obj);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002807 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002808 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002809 drm_mm_put_block(obj->gtt_space);
2810 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002811
Chris Wilson809b6332011-01-10 17:33:15 +00002812 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002813 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002814
2815 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002816 }
Eric Anholt673a3942008-07-30 12:06:12 -07002817
Daniel Vetter0ebb9822012-02-15 23:50:24 +01002818 if (!dev_priv->mm.aliasing_ppgtt)
2819 i915_gem_gtt_bind_object(obj, obj->cache_level);
Eric Anholt673a3942008-07-30 12:06:12 -07002820
Chris Wilson6299f992010-11-24 12:23:44 +00002821 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002822 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002823
Eric Anholt673a3942008-07-30 12:06:12 -07002824 /* Assert that the object is not currently in any GPU domain. As it
2825 * wasn't in the GTT, there shouldn't be any way it could have been in
2826 * a GPU cache
2827 */
Chris Wilson05394f32010-11-08 19:18:58 +00002828 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2829 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002830
Chris Wilson6299f992010-11-24 12:23:44 +00002831 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002832
Daniel Vetter75e9e912010-11-04 17:11:09 +01002833 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002834 obj->gtt_space->size == fence_size &&
Akshay Joshi0206e352011-08-16 15:34:10 -04002835 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002836
Daniel Vetter75e9e912010-11-04 17:11:09 +01002837 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002838 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002839
Chris Wilson05394f32010-11-08 19:18:58 +00002840 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002841
Chris Wilsondb53a302011-02-03 11:57:46 +00002842 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002843 return 0;
2844}
2845
2846void
Chris Wilson05394f32010-11-08 19:18:58 +00002847i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002848{
Eric Anholt673a3942008-07-30 12:06:12 -07002849 /* If we don't have a page list set up, then we're not pinned
2850 * to GPU, and we can ignore the cache flush because it'll happen
2851 * again at bind time.
2852 */
Chris Wilson05394f32010-11-08 19:18:58 +00002853 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002854 return;
2855
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002856 /* If the GPU is snooping the contents of the CPU cache,
2857 * we do not need to manually clear the CPU cache lines. However,
2858 * the caches are only snooped when the render cache is
2859 * flushed/invalidated. As we always have to emit invalidations
2860 * and flushes when moving into and out of the RENDER domain, correct
2861 * snooping behaviour occurs naturally as the result of our domain
2862 * tracking.
2863 */
2864 if (obj->cache_level != I915_CACHE_NONE)
2865 return;
2866
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002867 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002868
Chris Wilson05394f32010-11-08 19:18:58 +00002869 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002870}
2871
Eric Anholte47c68e2008-11-14 13:35:19 -08002872/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002873static int
Chris Wilson3619df02010-11-28 15:37:17 +00002874i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002875{
Chris Wilson05394f32010-11-08 19:18:58 +00002876 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002877 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002878
2879 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002880 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002881}
2882
2883/** Flushes the GTT write domain for the object if it's dirty. */
2884static void
Chris Wilson05394f32010-11-08 19:18:58 +00002885i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002886{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002887 uint32_t old_write_domain;
2888
Chris Wilson05394f32010-11-08 19:18:58 +00002889 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002890 return;
2891
Chris Wilson63256ec2011-01-04 18:42:07 +00002892 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002893 * to it immediately go to main memory as far as we know, so there's
2894 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002895 *
2896 * However, we do have to enforce the order so that all writes through
2897 * the GTT land before any writes to the device, such as updates to
2898 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002899 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002900 wmb();
2901
Chris Wilson05394f32010-11-08 19:18:58 +00002902 old_write_domain = obj->base.write_domain;
2903 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002904
2905 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002906 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002907 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002908}
2909
2910/** Flushes the CPU write domain for the object if it's dirty. */
2911static void
Chris Wilson05394f32010-11-08 19:18:58 +00002912i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002913{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002914 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002915
Chris Wilson05394f32010-11-08 19:18:58 +00002916 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002917 return;
2918
2919 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002920 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002921 old_write_domain = obj->base.write_domain;
2922 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002923
2924 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002925 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002926 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002927}
2928
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002929/**
2930 * Moves a single object to the GTT read, and possibly write domain.
2931 *
2932 * This function returns when the move is complete, including waiting on
2933 * flushes to occur.
2934 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002935int
Chris Wilson20217462010-11-23 15:26:33 +00002936i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002937{
Chris Wilson8325a092012-04-24 15:52:35 +01002938 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002939 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002940 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002941
Eric Anholt02354392008-11-26 13:58:13 -08002942 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002943 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002944 return -EINVAL;
2945
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002946 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2947 return 0;
2948
Chris Wilson88241782011-01-07 17:09:48 +00002949 ret = i915_gem_object_flush_gpu_write_domain(obj);
2950 if (ret)
2951 return ret;
2952
Chris Wilson87ca9c82010-12-02 09:42:56 +00002953 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002954 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002955 if (ret)
2956 return ret;
2957 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002958
Chris Wilson72133422010-09-13 23:56:38 +01002959 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002960
Chris Wilson05394f32010-11-08 19:18:58 +00002961 old_write_domain = obj->base.write_domain;
2962 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002963
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002964 /* It should now be out of any other write domains, and we can update
2965 * the domain values for our changes.
2966 */
Chris Wilson05394f32010-11-08 19:18:58 +00002967 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2968 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002969 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002970 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2971 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2972 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002973 }
2974
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002975 trace_i915_gem_object_change_domain(obj,
2976 old_read_domains,
2977 old_write_domain);
2978
Chris Wilson8325a092012-04-24 15:52:35 +01002979 /* And bump the LRU for this access */
2980 if (i915_gem_object_is_inactive(obj))
2981 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2982
Eric Anholte47c68e2008-11-14 13:35:19 -08002983 return 0;
2984}
2985
Chris Wilsone4ffd172011-04-04 09:44:39 +01002986int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2987 enum i915_cache_level cache_level)
2988{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002989 struct drm_device *dev = obj->base.dev;
2990 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsone4ffd172011-04-04 09:44:39 +01002991 int ret;
2992
2993 if (obj->cache_level == cache_level)
2994 return 0;
2995
2996 if (obj->pin_count) {
2997 DRM_DEBUG("can not change the cache level of pinned objects\n");
2998 return -EBUSY;
2999 }
3000
3001 if (obj->gtt_space) {
3002 ret = i915_gem_object_finish_gpu(obj);
3003 if (ret)
3004 return ret;
3005
3006 i915_gem_object_finish_gtt(obj);
3007
3008 /* Before SandyBridge, you could not use tiling or fence
3009 * registers with snooped memory, so relinquish any fences
3010 * currently pointing to our region in the aperture.
3011 */
3012 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3013 ret = i915_gem_object_put_fence(obj);
3014 if (ret)
3015 return ret;
3016 }
3017
Daniel Vetter74898d72012-02-15 23:50:22 +01003018 if (obj->has_global_gtt_mapping)
3019 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01003020 if (obj->has_aliasing_ppgtt_mapping)
3021 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3022 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01003023 }
3024
3025 if (cache_level == I915_CACHE_NONE) {
3026 u32 old_read_domains, old_write_domain;
3027
3028 /* If we're coming from LLC cached, then we haven't
3029 * actually been tracking whether the data is in the
3030 * CPU cache or not, since we only allow one bit set
3031 * in obj->write_domain and have been skipping the clflushes.
3032 * Just set it to the CPU cache for now.
3033 */
3034 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3035 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3036
3037 old_read_domains = obj->base.read_domains;
3038 old_write_domain = obj->base.write_domain;
3039
3040 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3041 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3042
3043 trace_i915_gem_object_change_domain(obj,
3044 old_read_domains,
3045 old_write_domain);
3046 }
3047
3048 obj->cache_level = cache_level;
3049 return 0;
3050}
3051
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003052/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003053 * Prepare buffer for display plane (scanout, cursors, etc).
3054 * Can be called from an uninterruptible phase (modesetting) and allows
3055 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003056 */
3057int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003058i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3059 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00003060 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003061{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003062 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003063 int ret;
3064
Chris Wilson88241782011-01-07 17:09:48 +00003065 ret = i915_gem_object_flush_gpu_write_domain(obj);
3066 if (ret)
3067 return ret;
3068
Chris Wilson0be73282010-12-06 14:36:27 +00003069 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07003070 ret = i915_gem_object_sync(obj, pipelined);
3071 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003072 return ret;
3073 }
3074
Eric Anholta7ef0642011-03-29 16:59:54 -07003075 /* The display engine is not coherent with the LLC cache on gen6. As
3076 * a result, we make sure that the pinning that is about to occur is
3077 * done with uncached PTEs. This is lowest common denominator for all
3078 * chipsets.
3079 *
3080 * However for gen6+, we could do better by using the GFDT bit instead
3081 * of uncaching, which would allow us to flush all the LLC-cached data
3082 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3083 */
3084 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3085 if (ret)
3086 return ret;
3087
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003088 /* As the user may map the buffer once pinned in the display plane
3089 * (e.g. libkms for the bootup splash), we have to ensure that we
3090 * always use map_and_fenceable for all scanout buffers.
3091 */
3092 ret = i915_gem_object_pin(obj, alignment, true);
3093 if (ret)
3094 return ret;
3095
Chris Wilsonb118c1e2010-05-27 13:18:14 +01003096 i915_gem_object_flush_cpu_write_domain(obj);
3097
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003098 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00003099 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003100
3101 /* It should now be out of any other write domains, and we can update
3102 * the domain values for our changes.
3103 */
3104 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00003105 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003106
3107 trace_i915_gem_object_change_domain(obj,
3108 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01003109 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08003110
3111 return 0;
3112}
3113
Chris Wilson85345512010-11-13 09:49:11 +00003114int
Chris Wilsona8198ee2011-04-13 22:04:09 +01003115i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00003116{
Chris Wilson88241782011-01-07 17:09:48 +00003117 int ret;
3118
Chris Wilsona8198ee2011-04-13 22:04:09 +01003119 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00003120 return 0;
3121
Chris Wilson88241782011-01-07 17:09:48 +00003122 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003123 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00003124 if (ret)
3125 return ret;
3126 }
Chris Wilson85345512010-11-13 09:49:11 +00003127
Chris Wilsonc501ae72011-12-14 13:57:23 +01003128 ret = i915_gem_object_wait_rendering(obj);
3129 if (ret)
3130 return ret;
3131
Chris Wilsona8198ee2011-04-13 22:04:09 +01003132 /* Ensure that we invalidate the GPU's caches and TLBs. */
3133 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01003134 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00003135}
3136
Eric Anholte47c68e2008-11-14 13:35:19 -08003137/**
3138 * Moves a single object to the CPU read, and possibly write domain.
3139 *
3140 * This function returns when the move is complete, including waiting on
3141 * flushes to occur.
3142 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02003143int
Chris Wilson919926a2010-11-12 13:42:53 +00003144i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08003145{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003146 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08003147 int ret;
3148
Chris Wilson8d7e3de2011-02-07 15:23:02 +00003149 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3150 return 0;
3151
Chris Wilson88241782011-01-07 17:09:48 +00003152 ret = i915_gem_object_flush_gpu_write_domain(obj);
3153 if (ret)
3154 return ret;
3155
Chris Wilsonf8413192012-04-10 11:52:50 +01003156 if (write || obj->pending_gpu_write) {
3157 ret = i915_gem_object_wait_rendering(obj);
3158 if (ret)
3159 return ret;
3160 }
Eric Anholte47c68e2008-11-14 13:35:19 -08003161
3162 i915_gem_object_flush_gtt_write_domain(obj);
3163
Chris Wilson05394f32010-11-08 19:18:58 +00003164 old_write_domain = obj->base.write_domain;
3165 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003166
Eric Anholte47c68e2008-11-14 13:35:19 -08003167 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00003168 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08003169 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08003170
Chris Wilson05394f32010-11-08 19:18:58 +00003171 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003172 }
3173
3174 /* It should now be out of any other write domains, and we can update
3175 * the domain values for our changes.
3176 */
Chris Wilson05394f32010-11-08 19:18:58 +00003177 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08003178
3179 /* If we're writing through the CPU, then the GPU read domains will
3180 * need to be invalidated at next use.
3181 */
3182 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00003183 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3184 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08003185 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003186
Chris Wilson1c5d22f2009-08-25 11:15:50 +01003187 trace_i915_gem_object_change_domain(obj,
3188 old_read_domains,
3189 old_write_domain);
3190
Eric Anholt2ef7eea2008-11-10 10:53:25 -08003191 return 0;
3192}
3193
Eric Anholt673a3942008-07-30 12:06:12 -07003194/* Throttle our rendering by waiting until the ring has completed our requests
3195 * emitted over 20 msec ago.
3196 *
Eric Anholtb9624422009-06-03 07:27:35 +00003197 * Note that if we were to use the current jiffies each time around the loop,
3198 * we wouldn't escape the function with any frames outstanding if the time to
3199 * render a frame was over 20ms.
3200 *
Eric Anholt673a3942008-07-30 12:06:12 -07003201 * This should get us reasonable parallelism between CPU and GPU but also
3202 * relatively low latency when blocking on a particular request to finish.
3203 */
3204static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003205i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003206{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003207 struct drm_i915_private *dev_priv = dev->dev_private;
3208 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003209 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003210 struct drm_i915_gem_request *request;
3211 struct intel_ring_buffer *ring = NULL;
3212 u32 seqno = 0;
3213 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003214
Chris Wilsone110e8d2011-01-26 15:39:14 +00003215 if (atomic_read(&dev_priv->mm.wedged))
3216 return -EIO;
3217
Chris Wilson1c255952010-09-26 11:03:27 +01003218 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003219 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003220 if (time_after_eq(request->emitted_jiffies, recent_enough))
3221 break;
3222
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003223 ring = request->ring;
3224 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003225 }
Chris Wilson1c255952010-09-26 11:03:27 +01003226 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003227
3228 if (seqno == 0)
3229 return 0;
3230
Ben Widawsky5c81fe852012-05-24 15:03:08 -07003231 ret = __wait_seqno(ring, seqno, true, NULL);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003232 if (ret == 0)
3233 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003234
Eric Anholt673a3942008-07-30 12:06:12 -07003235 return ret;
3236}
3237
Eric Anholt673a3942008-07-30 12:06:12 -07003238int
Chris Wilson05394f32010-11-08 19:18:58 +00003239i915_gem_object_pin(struct drm_i915_gem_object *obj,
3240 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003241 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003242{
Eric Anholt673a3942008-07-30 12:06:12 -07003243 int ret;
3244
Chris Wilson05394f32010-11-08 19:18:58 +00003245 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003246
Chris Wilson05394f32010-11-08 19:18:58 +00003247 if (obj->gtt_space != NULL) {
3248 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3249 (map_and_fenceable && !obj->map_and_fenceable)) {
3250 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003251 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003252 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3253 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003254 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003255 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003256 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003257 ret = i915_gem_object_unbind(obj);
3258 if (ret)
3259 return ret;
3260 }
3261 }
3262
Chris Wilson05394f32010-11-08 19:18:58 +00003263 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003264 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003265 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003266 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003267 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003268 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003269
Daniel Vetter74898d72012-02-15 23:50:22 +01003270 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3271 i915_gem_gtt_bind_object(obj, obj->cache_level);
3272
Chris Wilson1b502472012-04-24 15:47:30 +01003273 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003274 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003275
3276 return 0;
3277}
3278
3279void
Chris Wilson05394f32010-11-08 19:18:58 +00003280i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003281{
Chris Wilson05394f32010-11-08 19:18:58 +00003282 BUG_ON(obj->pin_count == 0);
3283 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003284
Chris Wilson1b502472012-04-24 15:47:30 +01003285 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003286 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003287}
3288
3289int
3290i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003291 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003292{
3293 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003294 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003295 int ret;
3296
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003297 ret = i915_mutex_lock_interruptible(dev);
3298 if (ret)
3299 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003300
Chris Wilson05394f32010-11-08 19:18:58 +00003301 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003302 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003303 ret = -ENOENT;
3304 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003305 }
Eric Anholt673a3942008-07-30 12:06:12 -07003306
Chris Wilson05394f32010-11-08 19:18:58 +00003307 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003308 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003309 ret = -EINVAL;
3310 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003311 }
3312
Chris Wilson05394f32010-11-08 19:18:58 +00003313 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003314 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3315 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003316 ret = -EINVAL;
3317 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003318 }
3319
Chris Wilson05394f32010-11-08 19:18:58 +00003320 obj->user_pin_count++;
3321 obj->pin_filp = file;
3322 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003323 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003324 if (ret)
3325 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003326 }
3327
3328 /* XXX - flush the CPU caches for pinned objects
3329 * as the X server doesn't manage domains yet
3330 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003331 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003332 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003333out:
Chris Wilson05394f32010-11-08 19:18:58 +00003334 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003335unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003336 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003337 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003338}
3339
3340int
3341i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003342 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003343{
3344 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003345 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003346 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003347
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003348 ret = i915_mutex_lock_interruptible(dev);
3349 if (ret)
3350 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003351
Chris Wilson05394f32010-11-08 19:18:58 +00003352 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003353 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003354 ret = -ENOENT;
3355 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003356 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003357
Chris Wilson05394f32010-11-08 19:18:58 +00003358 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003359 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3360 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003361 ret = -EINVAL;
3362 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003363 }
Chris Wilson05394f32010-11-08 19:18:58 +00003364 obj->user_pin_count--;
3365 if (obj->user_pin_count == 0) {
3366 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003367 i915_gem_object_unpin(obj);
3368 }
Eric Anholt673a3942008-07-30 12:06:12 -07003369
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003370out:
Chris Wilson05394f32010-11-08 19:18:58 +00003371 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003372unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003373 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003374 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003375}
3376
3377int
3378i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003379 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003380{
3381 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003382 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003383 int ret;
3384
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003385 ret = i915_mutex_lock_interruptible(dev);
3386 if (ret)
3387 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003388
Chris Wilson05394f32010-11-08 19:18:58 +00003389 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003390 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003391 ret = -ENOENT;
3392 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003393 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003394
Chris Wilson0be555b2010-08-04 15:36:30 +01003395 /* Count all active objects as busy, even if they are currently not used
3396 * by the gpu. Users of this interface expect objects to eventually
3397 * become non-busy without any further actions, therefore emit any
3398 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003399 */
Daniel Vetter30dfebf2012-06-01 15:21:23 +02003400 ret = i915_gem_object_flush_active(obj);
3401
Chris Wilson05394f32010-11-08 19:18:58 +00003402 args->busy = obj->active;
Chris Wilsone9808ed2012-07-04 12:25:08 +01003403 if (obj->ring) {
3404 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3405 args->busy |= intel_ring_flag(obj->ring) << 16;
3406 }
Eric Anholt673a3942008-07-30 12:06:12 -07003407
Chris Wilson05394f32010-11-08 19:18:58 +00003408 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003409unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003410 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003411 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003412}
3413
3414int
3415i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3416 struct drm_file *file_priv)
3417{
Akshay Joshi0206e352011-08-16 15:34:10 -04003418 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003419}
3420
Chris Wilson3ef94da2009-09-14 16:50:29 +01003421int
3422i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3423 struct drm_file *file_priv)
3424{
3425 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003426 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003427 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003428
3429 switch (args->madv) {
3430 case I915_MADV_DONTNEED:
3431 case I915_MADV_WILLNEED:
3432 break;
3433 default:
3434 return -EINVAL;
3435 }
3436
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003437 ret = i915_mutex_lock_interruptible(dev);
3438 if (ret)
3439 return ret;
3440
Chris Wilson05394f32010-11-08 19:18:58 +00003441 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003442 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003443 ret = -ENOENT;
3444 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003445 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003446
Chris Wilson05394f32010-11-08 19:18:58 +00003447 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003448 ret = -EINVAL;
3449 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003450 }
3451
Chris Wilson05394f32010-11-08 19:18:58 +00003452 if (obj->madv != __I915_MADV_PURGED)
3453 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003454
Chris Wilson2d7ef392009-09-20 23:13:10 +01003455 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003456 if (i915_gem_object_is_purgeable(obj) &&
3457 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003458 i915_gem_object_truncate(obj);
3459
Chris Wilson05394f32010-11-08 19:18:58 +00003460 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003461
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003462out:
Chris Wilson05394f32010-11-08 19:18:58 +00003463 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003464unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003465 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003466 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003467}
3468
Chris Wilson05394f32010-11-08 19:18:58 +00003469struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3470 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003471{
Chris Wilson73aa8082010-09-30 11:46:12 +01003472 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003473 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003474 struct address_space *mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01003475 u32 mask;
Daniel Vetterc397b902010-04-09 19:05:07 +00003476
3477 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3478 if (obj == NULL)
3479 return NULL;
3480
3481 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3482 kfree(obj);
3483 return NULL;
3484 }
3485
Chris Wilsonbed1ea92012-05-24 20:48:12 +01003486 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3487 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3488 /* 965gm cannot relocate objects above 4GiB. */
3489 mask &= ~__GFP_HIGHMEM;
3490 mask |= __GFP_DMA32;
3491 }
3492
Hugh Dickins5949eac2011-06-27 16:18:18 -07003493 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsonbed1ea92012-05-24 20:48:12 +01003494 mapping_set_gfp_mask(mapping, mask);
Hugh Dickins5949eac2011-06-27 16:18:18 -07003495
Chris Wilson73aa8082010-09-30 11:46:12 +01003496 i915_gem_info_add_obj(dev_priv, size);
3497
Daniel Vetterc397b902010-04-09 19:05:07 +00003498 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3499 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3500
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003501 if (HAS_LLC(dev)) {
3502 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003503 * cache) for about a 10% performance improvement
3504 * compared to uncached. Graphics requests other than
3505 * display scanout are coherent with the CPU in
3506 * accessing this cache. This means in this mode we
3507 * don't need to clflush on the CPU side, and on the
3508 * GPU side we only need to flush internal caches to
3509 * get data visible to the CPU.
3510 *
3511 * However, we maintain the display planes as UC, and so
3512 * need to rebind when first used as such.
3513 */
3514 obj->cache_level = I915_CACHE_LLC;
3515 } else
3516 obj->cache_level = I915_CACHE_NONE;
3517
Daniel Vetter62b8b212010-04-09 19:05:08 +00003518 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003519 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003520 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003521 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003522 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003523 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003524 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003525 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003526 /* Avoid an unnecessary call to unbind on the first bind. */
3527 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003528
Chris Wilson05394f32010-11-08 19:18:58 +00003529 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003530}
3531
Eric Anholt673a3942008-07-30 12:06:12 -07003532int i915_gem_init_object(struct drm_gem_object *obj)
3533{
Daniel Vetterc397b902010-04-09 19:05:07 +00003534 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003535
Eric Anholt673a3942008-07-30 12:06:12 -07003536 return 0;
3537}
3538
Chris Wilson1488fc02012-04-24 15:47:31 +01003539void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003540{
Chris Wilson1488fc02012-04-24 15:47:31 +01003541 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003542 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003543 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003544
Chris Wilson26e12f82011-03-20 11:20:19 +00003545 trace_i915_gem_object_destroy(obj);
3546
Daniel Vetter1286ff72012-05-10 15:25:09 +02003547 if (gem_obj->import_attach)
3548 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3549
Chris Wilson1488fc02012-04-24 15:47:31 +01003550 if (obj->phys_obj)
3551 i915_gem_detach_phys_object(dev, obj);
3552
3553 obj->pin_count = 0;
3554 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3555 bool was_interruptible;
3556
3557 was_interruptible = dev_priv->mm.interruptible;
3558 dev_priv->mm.interruptible = false;
3559
3560 WARN_ON(i915_gem_object_unbind(obj));
3561
3562 dev_priv->mm.interruptible = was_interruptible;
3563 }
3564
Chris Wilson05394f32010-11-08 19:18:58 +00003565 if (obj->base.map_list.map)
Rob Clarkb464e9a2011-08-10 08:09:08 -05003566 drm_gem_free_mmap_offset(&obj->base);
Chris Wilsonbe726152010-07-23 23:18:50 +01003567
Chris Wilson05394f32010-11-08 19:18:58 +00003568 drm_gem_object_release(&obj->base);
3569 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003570
Chris Wilson05394f32010-11-08 19:18:58 +00003571 kfree(obj->bit_17);
3572 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003573}
3574
Jesse Barnes5669fca2009-02-17 15:13:31 -08003575int
Eric Anholt673a3942008-07-30 12:06:12 -07003576i915_gem_idle(struct drm_device *dev)
3577{
3578 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003579 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003580
Keith Packard6dbe2772008-10-14 21:41:13 -07003581 mutex_lock(&dev->struct_mutex);
3582
Chris Wilson87acb0a2010-10-19 10:13:00 +01003583 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003584 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003585 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003586 }
Eric Anholt673a3942008-07-30 12:06:12 -07003587
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07003588 ret = i915_gpu_idle(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003589 if (ret) {
3590 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003591 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003592 }
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07003593 i915_gem_retire_requests(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003594
Chris Wilson29105cc2010-01-07 10:39:13 +00003595 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01003596 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3597 i915_gem_evict_everything(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003598
Chris Wilson312817a2010-11-22 11:50:11 +00003599 i915_gem_reset_fences(dev);
3600
Chris Wilson29105cc2010-01-07 10:39:13 +00003601 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3602 * We need to replace this with a semaphore, or something.
3603 * And not confound mm.suspended!
3604 */
3605 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003606 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003607
3608 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003609 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003610
Keith Packard6dbe2772008-10-14 21:41:13 -07003611 mutex_unlock(&dev->struct_mutex);
3612
Chris Wilson29105cc2010-01-07 10:39:13 +00003613 /* Cancel the retire work handler, which should be idle now. */
3614 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3615
Eric Anholt673a3942008-07-30 12:06:12 -07003616 return 0;
3617}
3618
Ben Widawskyb9524a12012-05-25 16:56:24 -07003619void i915_gem_l3_remap(struct drm_device *dev)
3620{
3621 drm_i915_private_t *dev_priv = dev->dev_private;
3622 u32 misccpctl;
3623 int i;
3624
3625 if (!IS_IVYBRIDGE(dev))
3626 return;
3627
3628 if (!dev_priv->mm.l3_remap_info)
3629 return;
3630
3631 misccpctl = I915_READ(GEN7_MISCCPCTL);
3632 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3633 POSTING_READ(GEN7_MISCCPCTL);
3634
3635 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3636 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3637 if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
3638 DRM_DEBUG("0x%x was already programmed to %x\n",
3639 GEN7_L3LOG_BASE + i, remap);
3640 if (remap && !dev_priv->mm.l3_remap_info[i/4])
3641 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3642 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
3643 }
3644
3645 /* Make sure all the writes land before disabling dop clock gating */
3646 POSTING_READ(GEN7_L3LOG_BASE);
3647
3648 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3649}
3650
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003651void i915_gem_init_swizzling(struct drm_device *dev)
3652{
3653 drm_i915_private_t *dev_priv = dev->dev_private;
3654
Daniel Vetter11782b02012-01-31 16:47:55 +01003655 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003656 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3657 return;
3658
3659 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3660 DISP_TILE_SURFACE_SWIZZLING);
3661
Daniel Vetter11782b02012-01-31 16:47:55 +01003662 if (IS_GEN5(dev))
3663 return;
3664
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003665 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3666 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02003667 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003668 else
Daniel Vetter6b26c862012-04-24 14:04:12 +02003669 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003670}
Daniel Vettere21af882012-02-09 20:53:27 +01003671
3672void i915_gem_init_ppgtt(struct drm_device *dev)
3673{
3674 drm_i915_private_t *dev_priv = dev->dev_private;
3675 uint32_t pd_offset;
3676 struct intel_ring_buffer *ring;
Daniel Vetter55a254a2012-03-22 00:14:43 +01003677 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3678 uint32_t __iomem *pd_addr;
3679 uint32_t pd_entry;
Daniel Vettere21af882012-02-09 20:53:27 +01003680 int i;
3681
3682 if (!dev_priv->mm.aliasing_ppgtt)
3683 return;
3684
Daniel Vetter55a254a2012-03-22 00:14:43 +01003685
3686 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3687 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3688 dma_addr_t pt_addr;
3689
3690 if (dev_priv->mm.gtt->needs_dmar)
3691 pt_addr = ppgtt->pt_dma_addr[i];
3692 else
3693 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3694
3695 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3696 pd_entry |= GEN6_PDE_VALID;
3697
3698 writel(pd_entry, pd_addr + i);
3699 }
3700 readl(pd_addr);
3701
3702 pd_offset = ppgtt->pd_offset;
Daniel Vettere21af882012-02-09 20:53:27 +01003703 pd_offset /= 64; /* in cachelines, */
3704 pd_offset <<= 16;
3705
3706 if (INTEL_INFO(dev)->gen == 6) {
Daniel Vetter48ecfa12012-04-11 20:42:40 +02003707 uint32_t ecochk, gab_ctl, ecobits;
3708
3709 ecobits = I915_READ(GAC_ECO_BITS);
3710 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
Daniel Vetterbe901a52012-04-11 20:42:39 +02003711
3712 gab_ctl = I915_READ(GAB_CTL);
3713 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3714
3715 ecochk = I915_READ(GAM_ECOCHK);
Daniel Vettere21af882012-02-09 20:53:27 +01003716 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3717 ECOCHK_PPGTT_CACHE64B);
Daniel Vetter6b26c862012-04-24 14:04:12 +02003718 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Daniel Vettere21af882012-02-09 20:53:27 +01003719 } else if (INTEL_INFO(dev)->gen >= 7) {
3720 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3721 /* GFX_MODE is per-ring on gen7+ */
3722 }
3723
Chris Wilsonb4519512012-05-11 14:29:30 +01003724 for_each_ring(ring, dev_priv, i) {
Daniel Vettere21af882012-02-09 20:53:27 +01003725 if (INTEL_INFO(dev)->gen >= 7)
3726 I915_WRITE(RING_MODE_GEN7(ring),
Daniel Vetter6b26c862012-04-24 14:04:12 +02003727 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Daniel Vettere21af882012-02-09 20:53:27 +01003728
3729 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3730 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3731 }
3732}
3733
Chris Wilson67b1b572012-07-05 23:49:40 +01003734static bool
3735intel_enable_blt(struct drm_device *dev)
3736{
3737 if (!HAS_BLT(dev))
3738 return false;
3739
3740 /* The blitter was dysfunctional on early prototypes */
3741 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3742 DRM_INFO("BLT not supported on this pre-production hardware;"
3743 " graphics performance will be degraded.\n");
3744 return false;
3745 }
3746
3747 return true;
3748}
3749
Eric Anholt673a3942008-07-30 12:06:12 -07003750int
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003751i915_gem_init_hw(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003752{
3753 drm_i915_private_t *dev_priv = dev->dev_private;
3754 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003755
Daniel Vetter8ecd1a62012-06-07 15:56:03 +02003756 if (!intel_enable_gtt())
3757 return -EIO;
3758
Ben Widawskyb9524a12012-05-25 16:56:24 -07003759 i915_gem_l3_remap(dev);
3760
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003761 i915_gem_init_swizzling(dev);
3762
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003763 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003764 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003765 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003766
3767 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003768 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003769 if (ret)
3770 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003771 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003772
Chris Wilson67b1b572012-07-05 23:49:40 +01003773 if (intel_enable_blt(dev)) {
Chris Wilson549f7362010-10-19 11:19:32 +01003774 ret = intel_init_blt_ring_buffer(dev);
3775 if (ret)
3776 goto cleanup_bsd_ring;
3777 }
3778
Chris Wilson6f392d52010-08-07 11:01:22 +01003779 dev_priv->next_seqno = 1;
3780
Ben Widawsky254f9652012-06-04 14:42:42 -07003781 /*
3782 * XXX: There was some w/a described somewhere suggesting loading
3783 * contexts before PPGTT.
3784 */
3785 i915_gem_context_init(dev);
Daniel Vettere21af882012-02-09 20:53:27 +01003786 i915_gem_init_ppgtt(dev);
3787
Chris Wilson68f95ba2010-05-27 13:18:22 +01003788 return 0;
3789
Chris Wilson549f7362010-10-19 11:19:32 +01003790cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003791 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003792cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003793 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003794 return ret;
3795}
3796
Chris Wilson1070a422012-04-24 15:47:41 +01003797static bool
3798intel_enable_ppgtt(struct drm_device *dev)
3799{
3800 if (i915_enable_ppgtt >= 0)
3801 return i915_enable_ppgtt;
3802
3803#ifdef CONFIG_INTEL_IOMMU
3804 /* Disable ppgtt on SNB if VT-d is on. */
3805 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3806 return false;
3807#endif
3808
3809 return true;
3810}
3811
3812int i915_gem_init(struct drm_device *dev)
3813{
3814 struct drm_i915_private *dev_priv = dev->dev_private;
3815 unsigned long gtt_size, mappable_size;
3816 int ret;
3817
3818 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3819 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3820
3821 mutex_lock(&dev->struct_mutex);
3822 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3823 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3824 * aperture accordingly when using aliasing ppgtt. */
3825 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3826
3827 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3828
3829 ret = i915_gem_init_aliasing_ppgtt(dev);
3830 if (ret) {
3831 mutex_unlock(&dev->struct_mutex);
3832 return ret;
3833 }
3834 } else {
3835 /* Let GEM Manage all of the aperture.
3836 *
3837 * However, leave one page at the end still bound to the scratch
3838 * page. There are a number of places where the hardware
3839 * apparently prefetches past the end of the object, and we've
3840 * seen multiple hangs with the GPU head pointer stuck in a
3841 * batchbuffer bound at the last page of the aperture. One page
3842 * should be enough to keep any prefetching inside of the
3843 * aperture.
3844 */
3845 i915_gem_init_global_gtt(dev, 0, mappable_size,
3846 gtt_size);
3847 }
3848
3849 ret = i915_gem_init_hw(dev);
3850 mutex_unlock(&dev->struct_mutex);
3851 if (ret) {
3852 i915_gem_cleanup_aliasing_ppgtt(dev);
3853 return ret;
3854 }
3855
Daniel Vetter53ca26c2012-04-26 23:28:03 +02003856 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3857 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3858 dev_priv->dri1.allow_batchbuffer = 1;
Chris Wilson1070a422012-04-24 15:47:41 +01003859 return 0;
3860}
3861
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003862void
3863i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3864{
3865 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01003866 struct intel_ring_buffer *ring;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003867 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003868
Chris Wilsonb4519512012-05-11 14:29:30 +01003869 for_each_ring(ring, dev_priv, i)
3870 intel_cleanup_ring_buffer(ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003871}
3872
3873int
Eric Anholt673a3942008-07-30 12:06:12 -07003874i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3875 struct drm_file *file_priv)
3876{
3877 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonb4519512012-05-11 14:29:30 +01003878 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003879
Jesse Barnes79e53942008-11-07 14:24:08 -08003880 if (drm_core_check_feature(dev, DRIVER_MODESET))
3881 return 0;
3882
Ben Gamariba1234d2009-09-14 17:48:47 -04003883 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003884 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003885 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003886 }
3887
Eric Anholt673a3942008-07-30 12:06:12 -07003888 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003889 dev_priv->mm.suspended = 0;
3890
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003891 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08003892 if (ret != 0) {
3893 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003894 return ret;
Wu Fengguangd816f6ac2009-04-18 10:43:32 +08003895 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003896
Chris Wilson69dc4982010-10-19 10:36:51 +01003897 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003898 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3899 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003900 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003901
Chris Wilson5f353082010-06-07 14:03:03 +01003902 ret = drm_irq_install(dev);
3903 if (ret)
3904 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003905
Eric Anholt673a3942008-07-30 12:06:12 -07003906 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003907
3908cleanup_ringbuffer:
3909 mutex_lock(&dev->struct_mutex);
3910 i915_gem_cleanup_ringbuffer(dev);
3911 dev_priv->mm.suspended = 1;
3912 mutex_unlock(&dev->struct_mutex);
3913
3914 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003915}
3916
3917int
3918i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3919 struct drm_file *file_priv)
3920{
Jesse Barnes79e53942008-11-07 14:24:08 -08003921 if (drm_core_check_feature(dev, DRIVER_MODESET))
3922 return 0;
3923
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003924 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003925 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003926}
3927
3928void
3929i915_gem_lastclose(struct drm_device *dev)
3930{
3931 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003932
Eric Anholte806b492009-01-22 09:56:58 -08003933 if (drm_core_check_feature(dev, DRIVER_MODESET))
3934 return;
3935
Keith Packard6dbe2772008-10-14 21:41:13 -07003936 ret = i915_gem_idle(dev);
3937 if (ret)
3938 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003939}
3940
Chris Wilson64193402010-10-24 12:38:05 +01003941static void
3942init_ring_lists(struct intel_ring_buffer *ring)
3943{
3944 INIT_LIST_HEAD(&ring->active_list);
3945 INIT_LIST_HEAD(&ring->request_list);
3946 INIT_LIST_HEAD(&ring->gpu_write_list);
3947}
3948
Eric Anholt673a3942008-07-30 12:06:12 -07003949void
3950i915_gem_load(struct drm_device *dev)
3951{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003952 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003953 drm_i915_private_t *dev_priv = dev->dev_private;
3954
Chris Wilson69dc4982010-10-19 10:36:51 +01003955 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003956 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3957 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003958 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003959 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003960 for (i = 0; i < I915_NUM_RINGS; i++)
3961 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02003962 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003963 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003964 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3965 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003966 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003967
Dave Airlie94400122010-07-20 13:15:31 +10003968 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3969 if (IS_GEN3(dev)) {
Daniel Vetter50743292012-04-26 22:02:54 +02003970 I915_WRITE(MI_ARB_STATE,
3971 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
Dave Airlie94400122010-07-20 13:15:31 +10003972 }
3973
Chris Wilson72bfa192010-12-19 11:42:05 +00003974 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3975
Jesse Barnesde151cf2008-11-12 10:03:55 -08003976 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003977 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3978 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003979
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003980 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003981 dev_priv->num_fence_regs = 16;
3982 else
3983 dev_priv->num_fence_regs = 8;
3984
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003985 /* Initialize fence registers to zero */
Chris Wilsonada726c2012-04-17 15:31:32 +01003986 i915_gem_reset_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07003987
Eric Anholt673a3942008-07-30 12:06:12 -07003988 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003989 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003990
Chris Wilsonce453d82011-02-21 14:43:56 +00003991 dev_priv->mm.interruptible = true;
3992
Chris Wilson17250b72010-10-28 12:51:39 +01003993 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3994 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3995 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003996}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003997
3998/*
3999 * Create a physically contiguous memory object for this object
4000 * e.g. for cursor + overlay regs
4001 */
Chris Wilson995b67622010-08-20 13:23:26 +01004002static int i915_gem_init_phys_object(struct drm_device *dev,
4003 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004004{
4005 drm_i915_private_t *dev_priv = dev->dev_private;
4006 struct drm_i915_gem_phys_object *phys_obj;
4007 int ret;
4008
4009 if (dev_priv->mm.phys_objs[id - 1] || !size)
4010 return 0;
4011
Eric Anholt9a298b22009-03-24 12:23:04 -07004012 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004013 if (!phys_obj)
4014 return -ENOMEM;
4015
4016 phys_obj->id = id;
4017
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004018 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004019 if (!phys_obj->handle) {
4020 ret = -ENOMEM;
4021 goto kfree_obj;
4022 }
4023#ifdef CONFIG_X86
4024 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4025#endif
4026
4027 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4028
4029 return 0;
4030kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07004031 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004032 return ret;
4033}
4034
Chris Wilson995b67622010-08-20 13:23:26 +01004035static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004036{
4037 drm_i915_private_t *dev_priv = dev->dev_private;
4038 struct drm_i915_gem_phys_object *phys_obj;
4039
4040 if (!dev_priv->mm.phys_objs[id - 1])
4041 return;
4042
4043 phys_obj = dev_priv->mm.phys_objs[id - 1];
4044 if (phys_obj->cur_obj) {
4045 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4046 }
4047
4048#ifdef CONFIG_X86
4049 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4050#endif
4051 drm_pci_free(dev, phys_obj->handle);
4052 kfree(phys_obj);
4053 dev_priv->mm.phys_objs[id - 1] = NULL;
4054}
4055
4056void i915_gem_free_all_phys_object(struct drm_device *dev)
4057{
4058 int i;
4059
Dave Airlie260883c2009-01-22 17:58:49 +10004060 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004061 i915_gem_free_phys_object(dev, i);
4062}
4063
4064void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004065 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004066{
Chris Wilson05394f32010-11-08 19:18:58 +00004067 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01004068 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004069 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004070 int page_count;
4071
Chris Wilson05394f32010-11-08 19:18:58 +00004072 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004073 return;
Chris Wilson05394f32010-11-08 19:18:58 +00004074 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004075
Chris Wilson05394f32010-11-08 19:18:58 +00004076 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004077 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07004078 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004079 if (!IS_ERR(page)) {
4080 char *dst = kmap_atomic(page);
4081 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4082 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004083
Chris Wilsone5281cc2010-10-28 13:45:36 +01004084 drm_clflush_pages(&page, 1);
4085
4086 set_page_dirty(page);
4087 mark_page_accessed(page);
4088 page_cache_release(page);
4089 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004090 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01004091 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01004092
Chris Wilson05394f32010-11-08 19:18:58 +00004093 obj->phys_obj->cur_obj = NULL;
4094 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004095}
4096
4097int
4098i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00004099 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01004100 int id,
4101 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004102{
Chris Wilson05394f32010-11-08 19:18:58 +00004103 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004104 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004105 int ret = 0;
4106 int page_count;
4107 int i;
4108
4109 if (id > I915_MAX_PHYS_OBJECT)
4110 return -EINVAL;
4111
Chris Wilson05394f32010-11-08 19:18:58 +00004112 if (obj->phys_obj) {
4113 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10004114 return 0;
4115 i915_gem_detach_phys_object(dev, obj);
4116 }
4117
Dave Airlie71acb5e2008-12-30 20:31:46 +10004118 /* create a new object */
4119 if (!dev_priv->mm.phys_objs[id - 1]) {
4120 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00004121 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004122 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00004123 DRM_ERROR("failed to init phys object %d size: %zu\n",
4124 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004125 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004126 }
4127 }
4128
4129 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00004130 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4131 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004132
Chris Wilson05394f32010-11-08 19:18:58 +00004133 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004134
4135 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01004136 struct page *page;
4137 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004138
Hugh Dickins5949eac2011-06-27 16:18:18 -07004139 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004140 if (IS_ERR(page))
4141 return PTR_ERR(page);
4142
Chris Wilsonff75b9b2010-10-30 22:52:31 +01004143 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00004144 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004145 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07004146 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01004147
4148 mark_page_accessed(page);
4149 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10004150 }
4151
4152 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004153}
4154
4155static int
Chris Wilson05394f32010-11-08 19:18:58 +00004156i915_gem_phys_pwrite(struct drm_device *dev,
4157 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10004158 struct drm_i915_gem_pwrite *args,
4159 struct drm_file *file_priv)
4160{
Chris Wilson05394f32010-11-08 19:18:58 +00004161 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004162 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10004163
Chris Wilsonb47b30c2010-11-08 01:12:29 +00004164 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4165 unsigned long unwritten;
4166
4167 /* The physical object once assigned is fixed for the lifetime
4168 * of the obj, so we can safely drop the lock and continue
4169 * to access vaddr.
4170 */
4171 mutex_unlock(&dev->struct_mutex);
4172 unwritten = copy_from_user(vaddr, user_data, args->size);
4173 mutex_lock(&dev->struct_mutex);
4174 if (unwritten)
4175 return -EFAULT;
4176 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10004177
Daniel Vetter40ce6572010-11-05 18:12:18 +01004178 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10004179 return 0;
4180}
Eric Anholtb9624422009-06-03 07:27:35 +00004181
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004182void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00004183{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004184 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00004185
4186 /* Clean up our request list when the client is going away, so that
4187 * later retire_requests won't dereference our soon-to-be-gone
4188 * file_priv.
4189 */
Chris Wilson1c255952010-09-26 11:03:27 +01004190 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01004191 while (!list_empty(&file_priv->mm.request_list)) {
4192 struct drm_i915_gem_request *request;
4193
4194 request = list_first_entry(&file_priv->mm.request_list,
4195 struct drm_i915_gem_request,
4196 client_list);
4197 list_del(&request->client_list);
4198 request->file_priv = NULL;
4199 }
Chris Wilson1c255952010-09-26 11:03:27 +01004200 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00004201}
Chris Wilson31169712009-09-14 16:50:28 +01004202
Chris Wilson31169712009-09-14 16:50:28 +01004203static int
Chris Wilson1637ef42010-04-20 17:10:35 +01004204i915_gpu_is_active(struct drm_device *dev)
4205{
4206 drm_i915_private_t *dev_priv = dev->dev_private;
4207 int lists_empty;
4208
Chris Wilson1637ef42010-04-20 17:10:35 +01004209 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01004210 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01004211
4212 return !lists_empty;
4213}
4214
4215static int
Ying Han1495f232011-05-24 17:12:27 -07004216i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01004217{
Chris Wilson17250b72010-10-28 12:51:39 +01004218 struct drm_i915_private *dev_priv =
4219 container_of(shrinker,
4220 struct drm_i915_private,
4221 mm.inactive_shrinker);
4222 struct drm_device *dev = dev_priv->dev;
4223 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07004224 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01004225 int cnt;
4226
4227 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01004228 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01004229
4230 /* "fast-path" to count number of available objects */
4231 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01004232 cnt = 0;
4233 list_for_each_entry(obj,
4234 &dev_priv->mm.inactive_list,
4235 mm_list)
4236 cnt++;
4237 mutex_unlock(&dev->struct_mutex);
4238 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004239 }
4240
Chris Wilson1637ef42010-04-20 17:10:35 +01004241rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004242 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01004243 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01004244
Chris Wilson17250b72010-10-28 12:51:39 +01004245 list_for_each_entry_safe(obj, next,
4246 &dev_priv->mm.inactive_list,
4247 mm_list) {
4248 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004249 if (i915_gem_object_unbind(obj) == 0 &&
4250 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004251 break;
Chris Wilson31169712009-09-14 16:50:28 +01004252 }
Chris Wilson31169712009-09-14 16:50:28 +01004253 }
4254
4255 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004256 cnt = 0;
4257 list_for_each_entry_safe(obj, next,
4258 &dev_priv->mm.inactive_list,
4259 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004260 if (nr_to_scan &&
4261 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004262 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004263 else
Chris Wilson17250b72010-10-28 12:51:39 +01004264 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004265 }
4266
Chris Wilson17250b72010-10-28 12:51:39 +01004267 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004268 /*
4269 * We are desperate for pages, so as a last resort, wait
4270 * for the GPU to finish and discard whatever we can.
4271 * This has a dramatic impact to reduce the number of
4272 * OOM-killer events whilst running the GPU aggressively.
4273 */
Ben Widawskyb2da9fe2012-04-26 16:02:58 -07004274 if (i915_gpu_idle(dev) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004275 goto rescan;
4276 }
Chris Wilson17250b72010-10-28 12:51:39 +01004277 mutex_unlock(&dev->struct_mutex);
4278 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004279}