blob: 1b808d0419498894f415f2ad727aab55b8b25496 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010032#include "i915_trace.h"
Jesse Barnes652c3932009-08-17 13:31:43 -070033#include "intel_drv.h"
Hugh Dickins5949eac2011-06-27 16:18:18 -070034#include <linux/shmem_fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Eric Anholt673a3942008-07-30 12:06:12 -070036#include <linux/swap.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include <linux/pci.h>
Eric Anholt673a3942008-07-30 12:06:12 -070038
Chris Wilson88241782011-01-07 17:09:48 +000039static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson05394f32010-11-08 19:18:58 +000040static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
Chris Wilson88241782011-01-07 17:09:48 +000042static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable);
Chris Wilson05394f32010-11-08 19:18:58 +000045static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +100047 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +000048 struct drm_file *file);
Eric Anholt673a3942008-07-30 12:06:12 -070049
Chris Wilson61050802012-04-17 15:31:31 +010050static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
Chris Wilson17250b72010-10-28 12:51:39 +010056static int i915_gem_inactive_shrink(struct shrinker *shrinker,
Ying Han1495f232011-05-24 17:12:27 -070057 struct shrink_control *sc);
Daniel Vetter8c599672011-12-14 13:57:31 +010058static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
Chris Wilson31169712009-09-14 16:50:28 +010059
Chris Wilson61050802012-04-17 15:31:31 +010060static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
61{
62 if (obj->tiling_mode)
63 i915_gem_release_mmap(obj);
64
65 /* As we do not have an associated fence register, we will force
66 * a tiling change if we ever need to acquire one.
67 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +010068 obj->fence_dirty = false;
Chris Wilson61050802012-04-17 15:31:31 +010069 obj->fence_reg = I915_FENCE_REG_NONE;
70}
71
Chris Wilson73aa8082010-09-30 11:46:12 +010072/* some bookkeeping */
73static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
74 size_t size)
75{
76 dev_priv->mm.object_count++;
77 dev_priv->mm.object_memory += size;
78}
79
80static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
81 size_t size)
82{
83 dev_priv->mm.object_count--;
84 dev_priv->mm.object_memory -= size;
85}
86
Chris Wilson21dd3732011-01-26 15:55:56 +000087static int
88i915_gem_wait_for_error(struct drm_device *dev)
Chris Wilson30dbf0c2010-09-25 10:19:17 +010089{
90 struct drm_i915_private *dev_priv = dev->dev_private;
91 struct completion *x = &dev_priv->error_completion;
92 unsigned long flags;
93 int ret;
94
95 if (!atomic_read(&dev_priv->mm.wedged))
96 return 0;
97
98 ret = wait_for_completion_interruptible(x);
99 if (ret)
100 return ret;
101
Chris Wilson21dd3732011-01-26 15:55:56 +0000102 if (atomic_read(&dev_priv->mm.wedged)) {
103 /* GPU is hung, bump the completion count to account for
104 * the token we just consumed so that we never hit zero and
105 * end up waiting upon a subsequent completion event that
106 * will never happen.
107 */
108 spin_lock_irqsave(&x->wait.lock, flags);
109 x->done++;
110 spin_unlock_irqrestore(&x->wait.lock, flags);
111 }
112 return 0;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100113}
114
Chris Wilson54cf91d2010-11-25 18:00:26 +0000115int i915_mutex_lock_interruptible(struct drm_device *dev)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100116{
Chris Wilson76c1dec2010-09-25 11:22:51 +0100117 int ret;
118
Chris Wilson21dd3732011-01-26 15:55:56 +0000119 ret = i915_gem_wait_for_error(dev);
Chris Wilson76c1dec2010-09-25 11:22:51 +0100120 if (ret)
121 return ret;
122
123 ret = mutex_lock_interruptible(&dev->struct_mutex);
124 if (ret)
125 return ret;
126
Chris Wilson23bc5982010-09-29 16:10:57 +0100127 WARN_ON(i915_verify_lists(dev));
Chris Wilson76c1dec2010-09-25 11:22:51 +0100128 return 0;
129}
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100130
Chris Wilson7d1c4802010-08-07 21:45:03 +0100131static inline bool
Chris Wilson05394f32010-11-08 19:18:58 +0000132i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
Chris Wilson7d1c4802010-08-07 21:45:03 +0100133{
Chris Wilson1b502472012-04-24 15:47:30 +0100134 return !obj->active;
Chris Wilson7d1c4802010-08-07 21:45:03 +0100135}
136
Eric Anholt673a3942008-07-30 12:06:12 -0700137int
138i915_gem_init_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000139 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700140{
Eric Anholt673a3942008-07-30 12:06:12 -0700141 struct drm_i915_gem_init *args = data;
Chris Wilson20217462010-11-23 15:26:33 +0000142
Daniel Vetter7bb6fb82012-04-24 08:22:52 +0200143 if (drm_core_check_feature(dev, DRIVER_MODESET))
144 return -ENODEV;
145
Chris Wilson20217462010-11-23 15:26:33 +0000146 if (args->gtt_start >= args->gtt_end ||
147 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
148 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700149
Daniel Vetterf534bc02012-03-26 22:37:04 +0200150 /* GEM with user mode setting was never supported on ilk and later. */
151 if (INTEL_INFO(dev)->gen >= 5)
152 return -ENODEV;
153
Eric Anholt673a3942008-07-30 12:06:12 -0700154 mutex_lock(&dev->struct_mutex);
Daniel Vetter644ec022012-03-26 09:45:40 +0200155 i915_gem_init_global_gtt(dev, args->gtt_start,
156 args->gtt_end, args->gtt_end);
Eric Anholt673a3942008-07-30 12:06:12 -0700157 mutex_unlock(&dev->struct_mutex);
158
Chris Wilson20217462010-11-23 15:26:33 +0000159 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700160}
161
Eric Anholt5a125c32008-10-22 21:40:13 -0700162int
163i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000164 struct drm_file *file)
Eric Anholt5a125c32008-10-22 21:40:13 -0700165{
Chris Wilson73aa8082010-09-30 11:46:12 +0100166 struct drm_i915_private *dev_priv = dev->dev_private;
Eric Anholt5a125c32008-10-22 21:40:13 -0700167 struct drm_i915_gem_get_aperture *args = data;
Chris Wilson6299f992010-11-24 12:23:44 +0000168 struct drm_i915_gem_object *obj;
169 size_t pinned;
Eric Anholt5a125c32008-10-22 21:40:13 -0700170
171 if (!(dev->driver->driver_features & DRIVER_GEM))
172 return -ENODEV;
173
Chris Wilson6299f992010-11-24 12:23:44 +0000174 pinned = 0;
Chris Wilson73aa8082010-09-30 11:46:12 +0100175 mutex_lock(&dev->struct_mutex);
Chris Wilson1b502472012-04-24 15:47:30 +0100176 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
177 if (obj->pin_count)
178 pinned += obj->gtt_space->size;
Chris Wilson73aa8082010-09-30 11:46:12 +0100179 mutex_unlock(&dev->struct_mutex);
Eric Anholt5a125c32008-10-22 21:40:13 -0700180
Chris Wilson6299f992010-11-24 12:23:44 +0000181 args->aper_size = dev_priv->mm.gtt_total;
Akshay Joshi0206e352011-08-16 15:34:10 -0400182 args->aper_available_size = args->aper_size - pinned;
Chris Wilson6299f992010-11-24 12:23:44 +0000183
Eric Anholt5a125c32008-10-22 21:40:13 -0700184 return 0;
185}
186
Dave Airlieff72145b2011-02-07 12:16:14 +1000187static int
188i915_gem_create(struct drm_file *file,
189 struct drm_device *dev,
190 uint64_t size,
191 uint32_t *handle_p)
Eric Anholt673a3942008-07-30 12:06:12 -0700192{
Chris Wilson05394f32010-11-08 19:18:58 +0000193 struct drm_i915_gem_object *obj;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300194 int ret;
195 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700196
Dave Airlieff72145b2011-02-07 12:16:14 +1000197 size = roundup(size, PAGE_SIZE);
Chris Wilson8ffc0242011-09-14 14:14:28 +0200198 if (size == 0)
199 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700200
201 /* Allocate the new object */
Dave Airlieff72145b2011-02-07 12:16:14 +1000202 obj = i915_gem_alloc_object(dev, size);
Eric Anholt673a3942008-07-30 12:06:12 -0700203 if (obj == NULL)
204 return -ENOMEM;
205
Chris Wilson05394f32010-11-08 19:18:58 +0000206 ret = drm_gem_handle_create(file, &obj->base, &handle);
Chris Wilson1dfd9752010-09-06 14:44:14 +0100207 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +0000208 drm_gem_object_release(&obj->base);
209 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100210 kfree(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700211 return ret;
Chris Wilson1dfd9752010-09-06 14:44:14 +0100212 }
213
Chris Wilson202f2fe2010-10-14 13:20:40 +0100214 /* drop reference from allocate - handle holds it now */
Chris Wilson05394f32010-11-08 19:18:58 +0000215 drm_gem_object_unreference(&obj->base);
Chris Wilson202f2fe2010-10-14 13:20:40 +0100216 trace_i915_gem_object_create(obj);
217
Dave Airlieff72145b2011-02-07 12:16:14 +1000218 *handle_p = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700219 return 0;
220}
221
Dave Airlieff72145b2011-02-07 12:16:14 +1000222int
223i915_gem_dumb_create(struct drm_file *file,
224 struct drm_device *dev,
225 struct drm_mode_create_dumb *args)
226{
227 /* have to work out size/pitch and return them */
Chris Wilsoned0291f2011-03-19 08:21:45 +0000228 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
Dave Airlieff72145b2011-02-07 12:16:14 +1000229 args->size = args->pitch * args->height;
230 return i915_gem_create(file, dev,
231 args->size, &args->handle);
232}
233
234int i915_gem_dumb_destroy(struct drm_file *file,
235 struct drm_device *dev,
236 uint32_t handle)
237{
238 return drm_gem_handle_delete(file, handle);
239}
240
241/**
242 * Creates a new mm object and returns a handle to it.
243 */
244int
245i915_gem_create_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *file)
247{
248 struct drm_i915_gem_create *args = data;
249 return i915_gem_create(file, dev,
250 args->size, &args->handle);
251}
252
Chris Wilson05394f32010-11-08 19:18:58 +0000253static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
Eric Anholt280b7132009-03-12 16:56:27 -0700254{
Chris Wilson05394f32010-11-08 19:18:58 +0000255 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt280b7132009-03-12 16:56:27 -0700256
257 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
Chris Wilson05394f32010-11-08 19:18:58 +0000258 obj->tiling_mode != I915_TILING_NONE;
Eric Anholt280b7132009-03-12 16:56:27 -0700259}
260
Daniel Vetter8c599672011-12-14 13:57:31 +0100261static inline int
Daniel Vetter8461d222011-12-14 13:57:32 +0100262__copy_to_user_swizzled(char __user *cpu_vaddr,
263 const char *gpu_vaddr, int gpu_offset,
264 int length)
265{
266 int ret, cpu_offset = 0;
267
268 while (length > 0) {
269 int cacheline_end = ALIGN(gpu_offset + 1, 64);
270 int this_length = min(cacheline_end - gpu_offset, length);
271 int swizzled_gpu_offset = gpu_offset ^ 64;
272
273 ret = __copy_to_user(cpu_vaddr + cpu_offset,
274 gpu_vaddr + swizzled_gpu_offset,
275 this_length);
276 if (ret)
277 return ret + length;
278
279 cpu_offset += this_length;
280 gpu_offset += this_length;
281 length -= this_length;
282 }
283
284 return 0;
285}
286
287static inline int
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700288__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
289 const char __user *cpu_vaddr,
Daniel Vetter8c599672011-12-14 13:57:31 +0100290 int length)
291{
292 int ret, cpu_offset = 0;
293
294 while (length > 0) {
295 int cacheline_end = ALIGN(gpu_offset + 1, 64);
296 int this_length = min(cacheline_end - gpu_offset, length);
297 int swizzled_gpu_offset = gpu_offset ^ 64;
298
299 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
300 cpu_vaddr + cpu_offset,
301 this_length);
302 if (ret)
303 return ret + length;
304
305 cpu_offset += this_length;
306 gpu_offset += this_length;
307 length -= this_length;
308 }
309
310 return 0;
311}
312
Daniel Vetterd174bd62012-03-25 19:47:40 +0200313/* Per-page copy function for the shmem pread fastpath.
314 * Flushes invalid cachelines before reading the target if
315 * needs_clflush is set. */
Eric Anholteb014592009-03-10 11:44:52 -0700316static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200317shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
318 char __user *user_data,
319 bool page_do_bit17_swizzling, bool needs_clflush)
320{
321 char *vaddr;
322 int ret;
323
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200324 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200325 return -EINVAL;
326
327 vaddr = kmap_atomic(page);
328 if (needs_clflush)
329 drm_clflush_virt_range(vaddr + shmem_page_offset,
330 page_length);
331 ret = __copy_to_user_inatomic(user_data,
332 vaddr + shmem_page_offset,
333 page_length);
334 kunmap_atomic(vaddr);
335
336 return ret;
337}
338
Daniel Vetter23c18c72012-03-25 19:47:42 +0200339static void
340shmem_clflush_swizzled_range(char *addr, unsigned long length,
341 bool swizzled)
342{
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200343 if (unlikely(swizzled)) {
Daniel Vetter23c18c72012-03-25 19:47:42 +0200344 unsigned long start = (unsigned long) addr;
345 unsigned long end = (unsigned long) addr + length;
346
347 /* For swizzling simply ensure that we always flush both
348 * channels. Lame, but simple and it works. Swizzled
349 * pwrite/pread is far from a hotpath - current userspace
350 * doesn't use it at all. */
351 start = round_down(start, 128);
352 end = round_up(end, 128);
353
354 drm_clflush_virt_range((void *)start, end - start);
355 } else {
356 drm_clflush_virt_range(addr, length);
357 }
358
359}
360
Daniel Vetterd174bd62012-03-25 19:47:40 +0200361/* Only difference to the fast-path function is that this can handle bit17
362 * and uses non-atomic copy and kmap functions. */
363static int
364shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
365 char __user *user_data,
366 bool page_do_bit17_swizzling, bool needs_clflush)
367{
368 char *vaddr;
369 int ret;
370
371 vaddr = kmap(page);
372 if (needs_clflush)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200373 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
374 page_length,
375 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200376
377 if (page_do_bit17_swizzling)
378 ret = __copy_to_user_swizzled(user_data,
379 vaddr, shmem_page_offset,
380 page_length);
381 else
382 ret = __copy_to_user(user_data,
383 vaddr + shmem_page_offset,
384 page_length);
385 kunmap(page);
386
387 return ret;
388}
389
Eric Anholteb014592009-03-10 11:44:52 -0700390static int
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200391i915_gem_shmem_pread(struct drm_device *dev,
392 struct drm_i915_gem_object *obj,
393 struct drm_i915_gem_pread *args,
394 struct drm_file *file)
Eric Anholteb014592009-03-10 11:44:52 -0700395{
Chris Wilson05394f32010-11-08 19:18:58 +0000396 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Daniel Vetter8461d222011-12-14 13:57:32 +0100397 char __user *user_data;
Eric Anholteb014592009-03-10 11:44:52 -0700398 ssize_t remain;
Daniel Vetter8461d222011-12-14 13:57:32 +0100399 loff_t offset;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100400 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8461d222011-12-14 13:57:32 +0100401 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200402 int hit_slowpath = 0;
Daniel Vetter96d79b52012-03-25 19:47:36 +0200403 int prefaulted = 0;
Daniel Vetter84897312012-03-25 19:47:31 +0200404 int needs_clflush = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200405 int release_page;
Eric Anholteb014592009-03-10 11:44:52 -0700406
Daniel Vetter8461d222011-12-14 13:57:32 +0100407 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholteb014592009-03-10 11:44:52 -0700408 remain = args->size;
409
Daniel Vetter8461d222011-12-14 13:57:32 +0100410 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholteb014592009-03-10 11:44:52 -0700411
Daniel Vetter84897312012-03-25 19:47:31 +0200412 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
413 /* If we're not in the cpu read domain, set ourself into the gtt
414 * read domain and manually flush cachelines (if required). This
415 * optimizes for the case when the gpu will dirty the data
416 * anyway again before the next pread happens. */
417 if (obj->cache_level == I915_CACHE_NONE)
418 needs_clflush = 1;
419 ret = i915_gem_object_set_to_gtt_domain(obj, false);
420 if (ret)
421 return ret;
422 }
Eric Anholteb014592009-03-10 11:44:52 -0700423
Eric Anholteb014592009-03-10 11:44:52 -0700424 offset = args->offset;
Daniel Vetter8461d222011-12-14 13:57:32 +0100425
Eric Anholteb014592009-03-10 11:44:52 -0700426 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100427 struct page *page;
428
Eric Anholteb014592009-03-10 11:44:52 -0700429 /* Operation in this page
430 *
Eric Anholteb014592009-03-10 11:44:52 -0700431 * shmem_page_offset = offset within page in shmem file
Eric Anholteb014592009-03-10 11:44:52 -0700432 * page_length = bytes to copy for this page
433 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100434 shmem_page_offset = offset_in_page(offset);
Eric Anholteb014592009-03-10 11:44:52 -0700435 page_length = remain;
436 if ((shmem_page_offset + page_length) > PAGE_SIZE)
437 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholteb014592009-03-10 11:44:52 -0700438
Daniel Vetter692a5762012-03-25 19:47:34 +0200439 if (obj->pages) {
440 page = obj->pages[offset >> PAGE_SHIFT];
441 release_page = 0;
442 } else {
443 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
444 if (IS_ERR(page)) {
445 ret = PTR_ERR(page);
446 goto out;
447 }
448 release_page = 1;
Jesper Juhlb65552f2011-06-12 20:53:44 +0000449 }
Chris Wilsone5281cc2010-10-28 13:45:36 +0100450
Daniel Vetter8461d222011-12-14 13:57:32 +0100451 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
452 (page_to_phys(page) & (1 << 17)) != 0;
453
Daniel Vetterd174bd62012-03-25 19:47:40 +0200454 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
455 user_data, page_do_bit17_swizzling,
456 needs_clflush);
457 if (ret == 0)
458 goto next_page;
Eric Anholteb014592009-03-10 11:44:52 -0700459
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200460 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200461 page_cache_get(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200462 mutex_unlock(&dev->struct_mutex);
463
Daniel Vetter96d79b52012-03-25 19:47:36 +0200464 if (!prefaulted) {
Daniel Vetterf56f8212012-03-25 19:47:41 +0200465 ret = fault_in_multipages_writeable(user_data, remain);
Daniel Vetter96d79b52012-03-25 19:47:36 +0200466 /* Userspace is tricking us, but we've already clobbered
467 * its pages with the prefault and promised to write the
468 * data up to the first fault. Hence ignore any errors
469 * and just continue. */
470 (void)ret;
471 prefaulted = 1;
472 }
473
Daniel Vetterd174bd62012-03-25 19:47:40 +0200474 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
475 user_data, page_do_bit17_swizzling,
476 needs_clflush);
Eric Anholteb014592009-03-10 11:44:52 -0700477
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200478 mutex_lock(&dev->struct_mutex);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100479 page_cache_release(page);
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200480next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100481 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200482 if (release_page)
483 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100484
Daniel Vetter8461d222011-12-14 13:57:32 +0100485 if (ret) {
486 ret = -EFAULT;
487 goto out;
488 }
489
Eric Anholteb014592009-03-10 11:44:52 -0700490 remain -= page_length;
Daniel Vetter8461d222011-12-14 13:57:32 +0100491 user_data += page_length;
Eric Anholteb014592009-03-10 11:44:52 -0700492 offset += page_length;
493 }
494
Chris Wilson4f27b752010-10-14 15:26:45 +0100495out:
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200496 if (hit_slowpath) {
497 /* Fixup: Kill any reinstated backing storage pages */
498 if (obj->madv == __I915_MADV_PURGED)
499 i915_gem_object_truncate(obj);
500 }
Eric Anholteb014592009-03-10 11:44:52 -0700501
502 return ret;
503}
504
Eric Anholt673a3942008-07-30 12:06:12 -0700505/**
506 * Reads data from the object referenced by handle.
507 *
508 * On error, the contents of *data are undefined.
509 */
510int
511i915_gem_pread_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000512 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700513{
514 struct drm_i915_gem_pread *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000515 struct drm_i915_gem_object *obj;
Chris Wilson35b62a82010-09-26 20:23:38 +0100516 int ret = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700517
Chris Wilson51311d02010-11-17 09:10:42 +0000518 if (args->size == 0)
519 return 0;
520
521 if (!access_ok(VERIFY_WRITE,
522 (char __user *)(uintptr_t)args->data_ptr,
523 args->size))
524 return -EFAULT;
525
Chris Wilson4f27b752010-10-14 15:26:45 +0100526 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100527 if (ret)
Chris Wilson4f27b752010-10-14 15:26:45 +0100528 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700529
Chris Wilson05394f32010-11-08 19:18:58 +0000530 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000531 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100532 ret = -ENOENT;
533 goto unlock;
Chris Wilson4f27b752010-10-14 15:26:45 +0100534 }
Eric Anholt673a3942008-07-30 12:06:12 -0700535
Chris Wilson7dcd2492010-09-26 20:21:44 +0100536 /* Bounds check source. */
Chris Wilson05394f32010-11-08 19:18:58 +0000537 if (args->offset > obj->base.size ||
538 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100539 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100540 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100541 }
542
Chris Wilsondb53a302011-02-03 11:57:46 +0000543 trace_i915_gem_object_pread(obj, args->offset, args->size);
544
Daniel Vetterdbf7bff2012-03-25 19:47:29 +0200545 ret = i915_gem_shmem_pread(dev, obj, args, file);
Eric Anholt673a3942008-07-30 12:06:12 -0700546
Chris Wilson35b62a82010-09-26 20:23:38 +0100547out:
Chris Wilson05394f32010-11-08 19:18:58 +0000548 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100549unlock:
Chris Wilson4f27b752010-10-14 15:26:45 +0100550 mutex_unlock(&dev->struct_mutex);
Eric Anholteb014592009-03-10 11:44:52 -0700551 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700552}
553
Keith Packard0839ccb2008-10-30 19:38:48 -0700554/* This is the fast write path which cannot handle
555 * page faults in the source data
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700556 */
Linus Torvalds9b7530cc2008-10-20 14:16:43 -0700557
Keith Packard0839ccb2008-10-30 19:38:48 -0700558static inline int
559fast_user_write(struct io_mapping *mapping,
560 loff_t page_base, int page_offset,
561 char __user *user_data,
562 int length)
563{
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700564 void __iomem *vaddr_atomic;
565 void *vaddr;
Keith Packard0839ccb2008-10-30 19:38:48 -0700566 unsigned long unwritten;
567
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700568 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
Ben Widawsky4f0c7cf2012-04-16 14:07:47 -0700569 /* We can use the cpu mem copy function because this is X86. */
570 vaddr = (void __force*)vaddr_atomic + page_offset;
571 unwritten = __copy_from_user_inatomic_nocache(vaddr,
Keith Packard0839ccb2008-10-30 19:38:48 -0700572 user_data, length);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700573 io_mapping_unmap_atomic(vaddr_atomic);
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100574 return unwritten;
Keith Packard0839ccb2008-10-30 19:38:48 -0700575}
576
Eric Anholt3de09aa2009-03-09 09:42:23 -0700577/**
578 * This is the fast pwrite path, where we copy the data directly from the
579 * user into the GTT, uncached.
580 */
Eric Anholt673a3942008-07-30 12:06:12 -0700581static int
Chris Wilson05394f32010-11-08 19:18:58 +0000582i915_gem_gtt_pwrite_fast(struct drm_device *dev,
583 struct drm_i915_gem_object *obj,
Eric Anholt3de09aa2009-03-09 09:42:23 -0700584 struct drm_i915_gem_pwrite *args,
Chris Wilson05394f32010-11-08 19:18:58 +0000585 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700586{
Keith Packard0839ccb2008-10-30 19:38:48 -0700587 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -0700588 ssize_t remain;
Keith Packard0839ccb2008-10-30 19:38:48 -0700589 loff_t offset, page_base;
Eric Anholt673a3942008-07-30 12:06:12 -0700590 char __user *user_data;
Daniel Vetter935aaa62012-03-25 19:47:35 +0200591 int page_offset, page_length, ret;
592
593 ret = i915_gem_object_pin(obj, 0, true);
594 if (ret)
595 goto out;
596
597 ret = i915_gem_object_set_to_gtt_domain(obj, true);
598 if (ret)
599 goto out_unpin;
600
601 ret = i915_gem_object_put_fence(obj);
602 if (ret)
603 goto out_unpin;
Eric Anholt673a3942008-07-30 12:06:12 -0700604
605 user_data = (char __user *) (uintptr_t) args->data_ptr;
606 remain = args->size;
Eric Anholt673a3942008-07-30 12:06:12 -0700607
Chris Wilson05394f32010-11-08 19:18:58 +0000608 offset = obj->gtt_offset + args->offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700609
610 while (remain > 0) {
611 /* Operation in this page
612 *
Keith Packard0839ccb2008-10-30 19:38:48 -0700613 * page_base = page offset within aperture
614 * page_offset = offset within page
615 * page_length = bytes to copy for this page
Eric Anholt673a3942008-07-30 12:06:12 -0700616 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100617 page_base = offset & PAGE_MASK;
618 page_offset = offset_in_page(offset);
Keith Packard0839ccb2008-10-30 19:38:48 -0700619 page_length = remain;
620 if ((page_offset + remain) > PAGE_SIZE)
621 page_length = PAGE_SIZE - page_offset;
Eric Anholt673a3942008-07-30 12:06:12 -0700622
Keith Packard0839ccb2008-10-30 19:38:48 -0700623 /* If we get a fault while copying data, then (presumably) our
Eric Anholt3de09aa2009-03-09 09:42:23 -0700624 * source page isn't available. Return the error and we'll
625 * retry in the slow path.
Keith Packard0839ccb2008-10-30 19:38:48 -0700626 */
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100627 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
Daniel Vetter935aaa62012-03-25 19:47:35 +0200628 page_offset, user_data, page_length)) {
629 ret = -EFAULT;
630 goto out_unpin;
631 }
Eric Anholt673a3942008-07-30 12:06:12 -0700632
Keith Packard0839ccb2008-10-30 19:38:48 -0700633 remain -= page_length;
634 user_data += page_length;
635 offset += page_length;
Eric Anholt673a3942008-07-30 12:06:12 -0700636 }
Eric Anholt673a3942008-07-30 12:06:12 -0700637
Daniel Vetter935aaa62012-03-25 19:47:35 +0200638out_unpin:
639 i915_gem_object_unpin(obj);
640out:
Eric Anholt3de09aa2009-03-09 09:42:23 -0700641 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700642}
643
Daniel Vetterd174bd62012-03-25 19:47:40 +0200644/* Per-page copy function for the shmem pwrite fastpath.
645 * Flushes invalid cachelines before writing to the target if
646 * needs_clflush_before is set and flushes out any written cachelines after
647 * writing if needs_clflush is set. */
Eric Anholt673a3942008-07-30 12:06:12 -0700648static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200649shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
650 char __user *user_data,
651 bool page_do_bit17_swizzling,
652 bool needs_clflush_before,
653 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700654{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200655 char *vaddr;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700656 int ret;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700657
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200658 if (unlikely(page_do_bit17_swizzling))
Daniel Vetterd174bd62012-03-25 19:47:40 +0200659 return -EINVAL;
Eric Anholt3de09aa2009-03-09 09:42:23 -0700660
Daniel Vetterd174bd62012-03-25 19:47:40 +0200661 vaddr = kmap_atomic(page);
662 if (needs_clflush_before)
663 drm_clflush_virt_range(vaddr + shmem_page_offset,
664 page_length);
665 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
666 user_data,
667 page_length);
668 if (needs_clflush_after)
669 drm_clflush_virt_range(vaddr + shmem_page_offset,
670 page_length);
671 kunmap_atomic(vaddr);
Eric Anholt3de09aa2009-03-09 09:42:23 -0700672
673 return ret;
674}
675
Daniel Vetterd174bd62012-03-25 19:47:40 +0200676/* Only difference to the fast-path function is that this can handle bit17
677 * and uses non-atomic copy and kmap functions. */
Eric Anholt3043c602008-10-02 12:24:47 -0700678static int
Daniel Vetterd174bd62012-03-25 19:47:40 +0200679shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
680 char __user *user_data,
681 bool page_do_bit17_swizzling,
682 bool needs_clflush_before,
683 bool needs_clflush_after)
Eric Anholt673a3942008-07-30 12:06:12 -0700684{
Daniel Vetterd174bd62012-03-25 19:47:40 +0200685 char *vaddr;
686 int ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700687
Daniel Vetterd174bd62012-03-25 19:47:40 +0200688 vaddr = kmap(page);
Daniel Vettere7e58eb2012-03-25 19:47:43 +0200689 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
Daniel Vetter23c18c72012-03-25 19:47:42 +0200690 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
691 page_length,
692 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200693 if (page_do_bit17_swizzling)
694 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
Chris Wilsone5281cc2010-10-28 13:45:36 +0100695 user_data,
696 page_length);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200697 else
698 ret = __copy_from_user(vaddr + shmem_page_offset,
699 user_data,
700 page_length);
701 if (needs_clflush_after)
Daniel Vetter23c18c72012-03-25 19:47:42 +0200702 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
703 page_length,
704 page_do_bit17_swizzling);
Daniel Vetterd174bd62012-03-25 19:47:40 +0200705 kunmap(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100706
Daniel Vetterd174bd62012-03-25 19:47:40 +0200707 return ret;
Eric Anholt40123c12009-03-09 13:42:30 -0700708}
709
Eric Anholt40123c12009-03-09 13:42:30 -0700710static int
Daniel Vettere244a442012-03-25 19:47:28 +0200711i915_gem_shmem_pwrite(struct drm_device *dev,
712 struct drm_i915_gem_object *obj,
713 struct drm_i915_gem_pwrite *args,
714 struct drm_file *file)
Eric Anholt40123c12009-03-09 13:42:30 -0700715{
Chris Wilson05394f32010-11-08 19:18:58 +0000716 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Eric Anholt40123c12009-03-09 13:42:30 -0700717 ssize_t remain;
Daniel Vetter8c599672011-12-14 13:57:31 +0100718 loff_t offset;
719 char __user *user_data;
Ben Widawskyeb2c0c82012-02-15 14:42:43 +0100720 int shmem_page_offset, page_length, ret = 0;
Daniel Vetter8c599672011-12-14 13:57:31 +0100721 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
Daniel Vettere244a442012-03-25 19:47:28 +0200722 int hit_slowpath = 0;
Daniel Vetter58642882012-03-25 19:47:37 +0200723 int needs_clflush_after = 0;
724 int needs_clflush_before = 0;
Daniel Vetter692a5762012-03-25 19:47:34 +0200725 int release_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700726
Daniel Vetter8c599672011-12-14 13:57:31 +0100727 user_data = (char __user *) (uintptr_t) args->data_ptr;
Eric Anholt40123c12009-03-09 13:42:30 -0700728 remain = args->size;
729
Daniel Vetter8c599672011-12-14 13:57:31 +0100730 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
Eric Anholt40123c12009-03-09 13:42:30 -0700731
Daniel Vetter58642882012-03-25 19:47:37 +0200732 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
733 /* If we're not in the cpu write domain, set ourself into the gtt
734 * write domain and manually flush cachelines (if required). This
735 * optimizes for the case when the gpu will use the data
736 * right away and we therefore have to clflush anyway. */
737 if (obj->cache_level == I915_CACHE_NONE)
738 needs_clflush_after = 1;
739 ret = i915_gem_object_set_to_gtt_domain(obj, true);
740 if (ret)
741 return ret;
742 }
743 /* Same trick applies for invalidate partially written cachelines before
744 * writing. */
745 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
746 && obj->cache_level == I915_CACHE_NONE)
747 needs_clflush_before = 1;
748
Eric Anholt40123c12009-03-09 13:42:30 -0700749 offset = args->offset;
Chris Wilson05394f32010-11-08 19:18:58 +0000750 obj->dirty = 1;
Eric Anholt40123c12009-03-09 13:42:30 -0700751
752 while (remain > 0) {
Chris Wilsone5281cc2010-10-28 13:45:36 +0100753 struct page *page;
Daniel Vetter58642882012-03-25 19:47:37 +0200754 int partial_cacheline_write;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100755
Eric Anholt40123c12009-03-09 13:42:30 -0700756 /* Operation in this page
757 *
Eric Anholt40123c12009-03-09 13:42:30 -0700758 * shmem_page_offset = offset within page in shmem file
Eric Anholt40123c12009-03-09 13:42:30 -0700759 * page_length = bytes to copy for this page
760 */
Chris Wilsonc8cbbb82011-05-12 22:17:11 +0100761 shmem_page_offset = offset_in_page(offset);
Eric Anholt40123c12009-03-09 13:42:30 -0700762
763 page_length = remain;
764 if ((shmem_page_offset + page_length) > PAGE_SIZE)
765 page_length = PAGE_SIZE - shmem_page_offset;
Eric Anholt40123c12009-03-09 13:42:30 -0700766
Daniel Vetter58642882012-03-25 19:47:37 +0200767 /* If we don't overwrite a cacheline completely we need to be
768 * careful to have up-to-date data by first clflushing. Don't
769 * overcomplicate things and flush the entire patch. */
770 partial_cacheline_write = needs_clflush_before &&
771 ((shmem_page_offset | page_length)
772 & (boot_cpu_data.x86_clflush_size - 1));
773
Daniel Vetter692a5762012-03-25 19:47:34 +0200774 if (obj->pages) {
775 page = obj->pages[offset >> PAGE_SHIFT];
776 release_page = 0;
777 } else {
778 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
779 if (IS_ERR(page)) {
780 ret = PTR_ERR(page);
781 goto out;
782 }
783 release_page = 1;
Chris Wilsone5281cc2010-10-28 13:45:36 +0100784 }
785
Daniel Vetter8c599672011-12-14 13:57:31 +0100786 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
787 (page_to_phys(page) & (1 << 17)) != 0;
788
Daniel Vetterd174bd62012-03-25 19:47:40 +0200789 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
790 user_data, page_do_bit17_swizzling,
791 partial_cacheline_write,
792 needs_clflush_after);
793 if (ret == 0)
794 goto next_page;
Eric Anholt40123c12009-03-09 13:42:30 -0700795
Daniel Vettere244a442012-03-25 19:47:28 +0200796 hit_slowpath = 1;
Daniel Vetter692a5762012-03-25 19:47:34 +0200797 page_cache_get(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200798 mutex_unlock(&dev->struct_mutex);
799
Daniel Vetterd174bd62012-03-25 19:47:40 +0200800 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
801 user_data, page_do_bit17_swizzling,
802 partial_cacheline_write,
803 needs_clflush_after);
Eric Anholt40123c12009-03-09 13:42:30 -0700804
Daniel Vettere244a442012-03-25 19:47:28 +0200805 mutex_lock(&dev->struct_mutex);
Daniel Vetter692a5762012-03-25 19:47:34 +0200806 page_cache_release(page);
Daniel Vettere244a442012-03-25 19:47:28 +0200807next_page:
Chris Wilsone5281cc2010-10-28 13:45:36 +0100808 set_page_dirty(page);
809 mark_page_accessed(page);
Daniel Vetter692a5762012-03-25 19:47:34 +0200810 if (release_page)
811 page_cache_release(page);
Chris Wilsone5281cc2010-10-28 13:45:36 +0100812
Daniel Vetter8c599672011-12-14 13:57:31 +0100813 if (ret) {
814 ret = -EFAULT;
815 goto out;
816 }
817
Eric Anholt40123c12009-03-09 13:42:30 -0700818 remain -= page_length;
Daniel Vetter8c599672011-12-14 13:57:31 +0100819 user_data += page_length;
Eric Anholt40123c12009-03-09 13:42:30 -0700820 offset += page_length;
821 }
822
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100823out:
Daniel Vettere244a442012-03-25 19:47:28 +0200824 if (hit_slowpath) {
825 /* Fixup: Kill any reinstated backing storage pages */
826 if (obj->madv == __I915_MADV_PURGED)
827 i915_gem_object_truncate(obj);
828 /* and flush dirty cachelines in case the object isn't in the cpu write
829 * domain anymore. */
830 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
831 i915_gem_clflush_object(obj);
832 intel_gtt_chipset_flush();
833 }
Daniel Vetter8c599672011-12-14 13:57:31 +0100834 }
Eric Anholt40123c12009-03-09 13:42:30 -0700835
Daniel Vetter58642882012-03-25 19:47:37 +0200836 if (needs_clflush_after)
837 intel_gtt_chipset_flush();
838
Eric Anholt40123c12009-03-09 13:42:30 -0700839 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700840}
841
842/**
843 * Writes data to the object referenced by handle.
844 *
845 * On error, the contents of the buffer that were to be modified are undefined.
846 */
847int
848i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100849 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700850{
851 struct drm_i915_gem_pwrite *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000852 struct drm_i915_gem_object *obj;
Chris Wilson51311d02010-11-17 09:10:42 +0000853 int ret;
854
855 if (args->size == 0)
856 return 0;
857
858 if (!access_ok(VERIFY_READ,
859 (char __user *)(uintptr_t)args->data_ptr,
860 args->size))
861 return -EFAULT;
862
Daniel Vetterf56f8212012-03-25 19:47:41 +0200863 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
864 args->size);
Chris Wilson51311d02010-11-17 09:10:42 +0000865 if (ret)
866 return -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700867
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100868 ret = i915_mutex_lock_interruptible(dev);
869 if (ret)
870 return ret;
871
Chris Wilson05394f32010-11-08 19:18:58 +0000872 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000873 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100874 ret = -ENOENT;
875 goto unlock;
876 }
Eric Anholt673a3942008-07-30 12:06:12 -0700877
Chris Wilson7dcd2492010-09-26 20:21:44 +0100878 /* Bounds check destination. */
Chris Wilson05394f32010-11-08 19:18:58 +0000879 if (args->offset > obj->base.size ||
880 args->size > obj->base.size - args->offset) {
Chris Wilsonce9d4192010-09-26 20:50:05 +0100881 ret = -EINVAL;
Chris Wilson35b62a82010-09-26 20:23:38 +0100882 goto out;
Chris Wilsonce9d4192010-09-26 20:50:05 +0100883 }
884
Chris Wilsondb53a302011-02-03 11:57:46 +0000885 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
886
Daniel Vetter935aaa62012-03-25 19:47:35 +0200887 ret = -EFAULT;
Eric Anholt673a3942008-07-30 12:06:12 -0700888 /* We can only do the GTT pwrite on untiled buffers, as otherwise
889 * it would end up going through the fenced access, and we'll get
890 * different detiling behavior between reading and writing.
891 * pread/pwrite currently are reading and writing from the CPU
892 * perspective, requiring manual detiling by the client.
893 */
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100894 if (obj->phys_obj) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100895 ret = i915_gem_phys_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100896 goto out;
897 }
898
899 if (obj->gtt_space &&
Daniel Vetter3ae53782012-03-25 19:47:33 +0200900 obj->cache_level == I915_CACHE_NONE &&
Daniel Vetterc07496f2012-04-13 15:51:51 +0200901 obj->tiling_mode == I915_TILING_NONE &&
Daniel Vetterffc62972012-03-25 19:47:38 +0200902 obj->map_and_fenceable &&
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100903 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100904 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter935aaa62012-03-25 19:47:35 +0200905 /* Note that the gtt paths might fail with non-page-backed user
906 * pointers (e.g. gtt mappings when moving data between
907 * textures). Fallback to the shmem path in that case. */
Eric Anholt40123c12009-03-09 13:42:30 -0700908 }
Eric Anholt673a3942008-07-30 12:06:12 -0700909
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100910 if (ret == -EFAULT)
Daniel Vetter935aaa62012-03-25 19:47:35 +0200911 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
Daniel Vetter5c0480f2011-12-14 13:57:30 +0100912
Chris Wilson35b62a82010-09-26 20:23:38 +0100913out:
Chris Wilson05394f32010-11-08 19:18:58 +0000914 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100915unlock:
Chris Wilsonfbd5a262010-10-14 15:03:58 +0100916 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700917 return ret;
918}
919
920/**
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800921 * Called when user space prepares to use an object with the CPU, either
922 * through the mmap ioctl's mapping or a GTT mapping.
Eric Anholt673a3942008-07-30 12:06:12 -0700923 */
924int
925i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000926 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700927{
928 struct drm_i915_gem_set_domain *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000929 struct drm_i915_gem_object *obj;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800930 uint32_t read_domains = args->read_domains;
931 uint32_t write_domain = args->write_domain;
Eric Anholt673a3942008-07-30 12:06:12 -0700932 int ret;
933
934 if (!(dev->driver->driver_features & DRIVER_GEM))
935 return -ENODEV;
936
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800937 /* Only handle setting domains to types used by the CPU. */
Chris Wilson21d509e2009-06-06 09:46:02 +0100938 if (write_domain & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800939 return -EINVAL;
940
Chris Wilson21d509e2009-06-06 09:46:02 +0100941 if (read_domains & I915_GEM_GPU_DOMAINS)
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800942 return -EINVAL;
943
944 /* Having something in the write domain implies it's in the read
945 * domain, and only that read domain. Enforce that in the request.
946 */
947 if (write_domain != 0 && read_domains != write_domain)
948 return -EINVAL;
949
Chris Wilson76c1dec2010-09-25 11:22:51 +0100950 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100951 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100952 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700953
Chris Wilson05394f32010-11-08 19:18:58 +0000954 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000955 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100956 ret = -ENOENT;
957 goto unlock;
Chris Wilson76c1dec2010-09-25 11:22:51 +0100958 }
Jesse Barnes652c3932009-08-17 13:31:43 -0700959
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800960 if (read_domains & I915_GEM_DOMAIN_GTT) {
961 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
Eric Anholt02354392008-11-26 13:58:13 -0800962
963 /* Silently promote "you're not bound, there was nothing to do"
964 * to success, since the client was just asking us to
965 * make sure everything was done.
966 */
967 if (ret == -EINVAL)
968 ret = 0;
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800969 } else {
Eric Anholte47c68e2008-11-14 13:35:19 -0800970 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
Eric Anholt2ef7eea2008-11-10 10:53:25 -0800971 }
972
Chris Wilson05394f32010-11-08 19:18:58 +0000973 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100974unlock:
Eric Anholt673a3942008-07-30 12:06:12 -0700975 mutex_unlock(&dev->struct_mutex);
976 return ret;
977}
978
979/**
980 * Called when user space has done writes to this buffer
981 */
982int
983i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +0000984 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -0700985{
986 struct drm_i915_gem_sw_finish *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +0000987 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -0700988 int ret = 0;
989
990 if (!(dev->driver->driver_features & DRIVER_GEM))
991 return -ENODEV;
992
Chris Wilson76c1dec2010-09-25 11:22:51 +0100993 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100994 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +0100995 return ret;
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100996
Chris Wilson05394f32010-11-08 19:18:58 +0000997 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +0000998 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +0100999 ret = -ENOENT;
1000 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07001001 }
1002
Eric Anholt673a3942008-07-30 12:06:12 -07001003 /* Pinned buffers may be scanout, so flush the cache */
Chris Wilson05394f32010-11-08 19:18:58 +00001004 if (obj->pin_count)
Eric Anholte47c68e2008-11-14 13:35:19 -08001005 i915_gem_object_flush_cpu_write_domain(obj);
1006
Chris Wilson05394f32010-11-08 19:18:58 +00001007 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001008unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07001009 mutex_unlock(&dev->struct_mutex);
1010 return ret;
1011}
1012
1013/**
1014 * Maps the contents of an object, returning the address it is mapped
1015 * into.
1016 *
1017 * While the mapping holds a reference on the contents of the object, it doesn't
1018 * imply a ref on the object itself.
1019 */
1020int
1021i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00001022 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07001023{
1024 struct drm_i915_gem_mmap *args = data;
1025 struct drm_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001026 unsigned long addr;
1027
1028 if (!(dev->driver->driver_features & DRIVER_GEM))
1029 return -ENODEV;
1030
Chris Wilson05394f32010-11-08 19:18:58 +00001031 obj = drm_gem_object_lookup(dev, file, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -07001032 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +01001033 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -07001034
Eric Anholt673a3942008-07-30 12:06:12 -07001035 down_write(&current->mm->mmap_sem);
1036 addr = do_mmap(obj->filp, 0, args->size,
1037 PROT_READ | PROT_WRITE, MAP_SHARED,
1038 args->offset);
1039 up_write(&current->mm->mmap_sem);
Luca Barbieribc9025b2010-02-09 05:49:12 +00001040 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001041 if (IS_ERR((void *)addr))
1042 return addr;
1043
1044 args->addr_ptr = (uint64_t) addr;
1045
1046 return 0;
1047}
1048
Jesse Barnesde151cf2008-11-12 10:03:55 -08001049/**
1050 * i915_gem_fault - fault a page into the GTT
1051 * vma: VMA in question
1052 * vmf: fault info
1053 *
1054 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1055 * from userspace. The fault handler takes care of binding the object to
1056 * the GTT (if needed), allocating and programming a fence register (again,
1057 * only if needed based on whether the old reg is still valid or the object
1058 * is tiled) and inserting a new PTE into the faulting process.
1059 *
1060 * Note that the faulting process may involve evicting existing objects
1061 * from the GTT and/or fence registers to make room. So performance may
1062 * suffer if the GTT working set is large or there are few fence registers
1063 * left.
1064 */
1065int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1066{
Chris Wilson05394f32010-11-08 19:18:58 +00001067 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1068 struct drm_device *dev = obj->base.dev;
Chris Wilson7d1c4802010-08-07 21:45:03 +01001069 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001070 pgoff_t page_offset;
1071 unsigned long pfn;
1072 int ret = 0;
Jesse Barnes0f973f22009-01-26 17:10:45 -08001073 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
Jesse Barnesde151cf2008-11-12 10:03:55 -08001074
1075 /* We don't use vmf->pgoff since that has the fake offset */
1076 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1077 PAGE_SHIFT;
1078
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001079 ret = i915_mutex_lock_interruptible(dev);
1080 if (ret)
1081 goto out;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001082
Chris Wilsondb53a302011-02-03 11:57:46 +00001083 trace_i915_gem_object_fault(obj, page_offset, true, write);
1084
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001085 /* Now bind it into the GTT if needed */
Chris Wilson919926a2010-11-12 13:42:53 +00001086 if (!obj->map_and_fenceable) {
1087 ret = i915_gem_object_unbind(obj);
1088 if (ret)
1089 goto unlock;
Chris Wilsona00b10c2010-09-24 21:15:47 +01001090 }
Chris Wilson05394f32010-11-08 19:18:58 +00001091 if (!obj->gtt_space) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01001092 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
Chris Wilsonc7150892009-09-23 00:43:56 +01001093 if (ret)
1094 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001095
Eric Anholte92d03b2011-06-14 16:43:09 -07001096 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1097 if (ret)
1098 goto unlock;
1099 }
Chris Wilson4a684a42010-10-28 14:44:08 +01001100
Daniel Vetter74898d72012-02-15 23:50:22 +01001101 if (!obj->has_global_gtt_mapping)
1102 i915_gem_gtt_bind_object(obj, obj->cache_level);
1103
Chris Wilson06d98132012-04-17 15:31:24 +01001104 ret = i915_gem_object_get_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00001105 if (ret)
1106 goto unlock;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001107
Chris Wilson05394f32010-11-08 19:18:58 +00001108 if (i915_gem_object_is_inactive(obj))
1109 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilson7d1c4802010-08-07 21:45:03 +01001110
Chris Wilson6299f992010-11-24 12:23:44 +00001111 obj->fault_mappable = true;
1112
Chris Wilson05394f32010-11-08 19:18:58 +00001113 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
Jesse Barnesde151cf2008-11-12 10:03:55 -08001114 page_offset;
1115
1116 /* Finally, remap it using the new GTT offset */
1117 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
Chris Wilsonc7150892009-09-23 00:43:56 +01001118unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001119 mutex_unlock(&dev->struct_mutex);
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001120out:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001121 switch (ret) {
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001122 case -EIO:
Chris Wilson045e7692010-11-07 09:18:22 +00001123 case -EAGAIN:
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001124 /* Give the error handler a chance to run and move the
1125 * objects off the GPU active list. Next time we service the
1126 * fault, we should be able to transition the page into the
1127 * GTT without touching the GPU (and so avoid further
1128 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1129 * with coherency, just lost writes.
1130 */
Chris Wilson045e7692010-11-07 09:18:22 +00001131 set_need_resched();
Chris Wilsonc7150892009-09-23 00:43:56 +01001132 case 0:
1133 case -ERESTARTSYS:
Chris Wilsonbed636a2011-02-11 20:31:19 +00001134 case -EINTR:
Chris Wilsonc7150892009-09-23 00:43:56 +01001135 return VM_FAULT_NOPAGE;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001136 case -ENOMEM:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001137 return VM_FAULT_OOM;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001138 default:
Chris Wilsonc7150892009-09-23 00:43:56 +01001139 return VM_FAULT_SIGBUS;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001140 }
1141}
1142
1143/**
Chris Wilson901782b2009-07-10 08:18:50 +01001144 * i915_gem_release_mmap - remove physical page mappings
1145 * @obj: obj in question
1146 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001147 * Preserve the reservation of the mmapping with the DRM core code, but
Chris Wilson901782b2009-07-10 08:18:50 +01001148 * relinquish ownership of the pages back to the system.
1149 *
1150 * It is vital that we remove the page mapping if we have mapped a tiled
1151 * object through the GTT and then lose the fence register due to
1152 * resource pressure. Similarly if the object has been moved out of the
1153 * aperture, than pages mapped into userspace must be revoked. Removing the
1154 * mapping will then trigger a page fault on the next user access, allowing
1155 * fixup by i915_gem_fault().
1156 */
Eric Anholtd05ca302009-07-10 13:02:26 -07001157void
Chris Wilson05394f32010-11-08 19:18:58 +00001158i915_gem_release_mmap(struct drm_i915_gem_object *obj)
Chris Wilson901782b2009-07-10 08:18:50 +01001159{
Chris Wilson6299f992010-11-24 12:23:44 +00001160 if (!obj->fault_mappable)
1161 return;
Chris Wilson901782b2009-07-10 08:18:50 +01001162
Chris Wilsonf6e47882011-03-20 21:09:12 +00001163 if (obj->base.dev->dev_mapping)
1164 unmap_mapping_range(obj->base.dev->dev_mapping,
1165 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1166 obj->base.size, 1);
Daniel Vetterfb7d5162010-10-01 22:05:20 +02001167
Chris Wilson6299f992010-11-24 12:23:44 +00001168 obj->fault_mappable = false;
Chris Wilson901782b2009-07-10 08:18:50 +01001169}
1170
Chris Wilson92b88ae2010-11-09 11:47:32 +00001171static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001172i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
Chris Wilson92b88ae2010-11-09 11:47:32 +00001173{
Chris Wilsone28f8712011-07-18 13:11:49 -07001174 uint32_t gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001175
1176 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001177 tiling_mode == I915_TILING_NONE)
1178 return size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001179
1180 /* Previous chips need a power-of-two fence region when tiling */
1181 if (INTEL_INFO(dev)->gen == 3)
Chris Wilsone28f8712011-07-18 13:11:49 -07001182 gtt_size = 1024*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001183 else
Chris Wilsone28f8712011-07-18 13:11:49 -07001184 gtt_size = 512*1024;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001185
Chris Wilsone28f8712011-07-18 13:11:49 -07001186 while (gtt_size < size)
1187 gtt_size <<= 1;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001188
Chris Wilsone28f8712011-07-18 13:11:49 -07001189 return gtt_size;
Chris Wilson92b88ae2010-11-09 11:47:32 +00001190}
1191
Jesse Barnesde151cf2008-11-12 10:03:55 -08001192/**
1193 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1194 * @obj: object to check
1195 *
1196 * Return the required GTT alignment for an object, taking into account
Daniel Vetter5e783302010-11-14 22:32:36 +01001197 * potential fence register mapping.
Jesse Barnesde151cf2008-11-12 10:03:55 -08001198 */
1199static uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001200i915_gem_get_gtt_alignment(struct drm_device *dev,
1201 uint32_t size,
1202 int tiling_mode)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001203{
Jesse Barnesde151cf2008-11-12 10:03:55 -08001204 /*
1205 * Minimum alignment is 4k (GTT page size), but might be greater
1206 * if a fence register is needed for the object.
1207 */
Chris Wilsona00b10c2010-09-24 21:15:47 +01001208 if (INTEL_INFO(dev)->gen >= 4 ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001209 tiling_mode == I915_TILING_NONE)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001210 return 4096;
1211
1212 /*
1213 * Previous chips need to be aligned to the size of the smallest
1214 * fence register that can contain the object.
1215 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001216 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01001217}
1218
Daniel Vetter5e783302010-11-14 22:32:36 +01001219/**
1220 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1221 * unfenced object
Chris Wilsone28f8712011-07-18 13:11:49 -07001222 * @dev: the device
1223 * @size: size of the object
1224 * @tiling_mode: tiling mode of the object
Daniel Vetter5e783302010-11-14 22:32:36 +01001225 *
1226 * Return the required GTT alignment for an object, only taking into account
1227 * unfenced tiled surface requirements.
1228 */
Chris Wilson467cffb2011-03-07 10:42:03 +00001229uint32_t
Chris Wilsone28f8712011-07-18 13:11:49 -07001230i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1231 uint32_t size,
1232 int tiling_mode)
Daniel Vetter5e783302010-11-14 22:32:36 +01001233{
Daniel Vetter5e783302010-11-14 22:32:36 +01001234 /*
1235 * Minimum alignment is 4k (GTT page size) for sane hw.
1236 */
1237 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
Chris Wilsone28f8712011-07-18 13:11:49 -07001238 tiling_mode == I915_TILING_NONE)
Daniel Vetter5e783302010-11-14 22:32:36 +01001239 return 4096;
1240
Chris Wilsone28f8712011-07-18 13:11:49 -07001241 /* Previous hardware however needs to be aligned to a power-of-two
1242 * tile height. The simplest method for determining this is to reuse
1243 * the power-of-tile object size.
Daniel Vetter5e783302010-11-14 22:32:36 +01001244 */
Chris Wilsone28f8712011-07-18 13:11:49 -07001245 return i915_gem_get_gtt_size(dev, size, tiling_mode);
Daniel Vetter5e783302010-11-14 22:32:36 +01001246}
1247
Jesse Barnesde151cf2008-11-12 10:03:55 -08001248int
Dave Airlieff72145b2011-02-07 12:16:14 +10001249i915_gem_mmap_gtt(struct drm_file *file,
1250 struct drm_device *dev,
1251 uint32_t handle,
1252 uint64_t *offset)
Jesse Barnesde151cf2008-11-12 10:03:55 -08001253{
Chris Wilsonda761a62010-10-27 17:37:08 +01001254 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001255 struct drm_i915_gem_object *obj;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001256 int ret;
1257
1258 if (!(dev->driver->driver_features & DRIVER_GEM))
1259 return -ENODEV;
1260
Chris Wilson76c1dec2010-09-25 11:22:51 +01001261 ret = i915_mutex_lock_interruptible(dev);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001262 if (ret)
Chris Wilson76c1dec2010-09-25 11:22:51 +01001263 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001264
Dave Airlieff72145b2011-02-07 12:16:14 +10001265 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00001266 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001267 ret = -ENOENT;
1268 goto unlock;
1269 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08001270
Chris Wilson05394f32010-11-08 19:18:58 +00001271 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
Chris Wilsonda761a62010-10-27 17:37:08 +01001272 ret = -E2BIG;
Eric Anholtff56b0b2011-10-31 23:16:21 -07001273 goto out;
Chris Wilsonda761a62010-10-27 17:37:08 +01001274 }
1275
Chris Wilson05394f32010-11-08 19:18:58 +00001276 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonab182822009-09-22 18:46:17 +01001277 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001278 ret = -EINVAL;
1279 goto out;
Chris Wilsonab182822009-09-22 18:46:17 +01001280 }
1281
Chris Wilson05394f32010-11-08 19:18:58 +00001282 if (!obj->base.map_list.map) {
Rob Clarkb464e9a2011-08-10 08:09:08 -05001283 ret = drm_gem_create_mmap_offset(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001284 if (ret)
1285 goto out;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001286 }
1287
Dave Airlieff72145b2011-02-07 12:16:14 +10001288 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001289
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001290out:
Chris Wilson05394f32010-11-08 19:18:58 +00001291 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001292unlock:
Jesse Barnesde151cf2008-11-12 10:03:55 -08001293 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01001294 return ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08001295}
1296
Dave Airlieff72145b2011-02-07 12:16:14 +10001297/**
1298 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1299 * @dev: DRM device
1300 * @data: GTT mapping ioctl data
1301 * @file: GEM object info
1302 *
1303 * Simply returns the fake offset to userspace so it can mmap it.
1304 * The mmap call will end up in drm_gem_mmap(), which will set things
1305 * up so we can get faults in the handler above.
1306 *
1307 * The fault handler will take care of binding the object into the GTT
1308 * (since it may have been evicted to make room for something), allocating
1309 * a fence register, and mapping the appropriate aperture address into
1310 * userspace.
1311 */
1312int
1313i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1314 struct drm_file *file)
1315{
1316 struct drm_i915_gem_mmap_gtt *args = data;
1317
1318 if (!(dev->driver->driver_features & DRIVER_GEM))
1319 return -ENODEV;
1320
1321 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1322}
1323
1324
Chris Wilsone5281cc2010-10-28 13:45:36 +01001325static int
Chris Wilson05394f32010-11-08 19:18:58 +00001326i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
Chris Wilsone5281cc2010-10-28 13:45:36 +01001327 gfp_t gfpmask)
1328{
Chris Wilsone5281cc2010-10-28 13:45:36 +01001329 int page_count, i;
1330 struct address_space *mapping;
1331 struct inode *inode;
1332 struct page *page;
1333
1334 /* Get the list of pages out of our struct file. They'll be pinned
1335 * at this point until we release them.
1336 */
Chris Wilson05394f32010-11-08 19:18:58 +00001337 page_count = obj->base.size / PAGE_SIZE;
1338 BUG_ON(obj->pages != NULL);
1339 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1340 if (obj->pages == NULL)
Chris Wilsone5281cc2010-10-28 13:45:36 +01001341 return -ENOMEM;
1342
Chris Wilson05394f32010-11-08 19:18:58 +00001343 inode = obj->base.filp->f_path.dentry->d_inode;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001344 mapping = inode->i_mapping;
Hugh Dickins5949eac2011-06-27 16:18:18 -07001345 gfpmask |= mapping_gfp_mask(mapping);
1346
Chris Wilsone5281cc2010-10-28 13:45:36 +01001347 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07001348 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001349 if (IS_ERR(page))
1350 goto err_pages;
1351
Chris Wilson05394f32010-11-08 19:18:58 +00001352 obj->pages[i] = page;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001353 }
1354
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001355 if (i915_gem_object_needs_bit17_swizzle(obj))
Chris Wilsone5281cc2010-10-28 13:45:36 +01001356 i915_gem_object_do_bit_17_swizzle(obj);
1357
1358 return 0;
1359
1360err_pages:
1361 while (i--)
Chris Wilson05394f32010-11-08 19:18:58 +00001362 page_cache_release(obj->pages[i]);
Chris Wilsone5281cc2010-10-28 13:45:36 +01001363
Chris Wilson05394f32010-11-08 19:18:58 +00001364 drm_free_large(obj->pages);
1365 obj->pages = NULL;
Chris Wilsone5281cc2010-10-28 13:45:36 +01001366 return PTR_ERR(page);
1367}
1368
Chris Wilson5cdf5882010-09-27 15:51:07 +01001369static void
Chris Wilson05394f32010-11-08 19:18:58 +00001370i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001371{
Chris Wilson05394f32010-11-08 19:18:58 +00001372 int page_count = obj->base.size / PAGE_SIZE;
Eric Anholt673a3942008-07-30 12:06:12 -07001373 int i;
1374
Chris Wilson05394f32010-11-08 19:18:58 +00001375 BUG_ON(obj->madv == __I915_MADV_PURGED);
Eric Anholt856fa192009-03-19 14:10:50 -07001376
Daniel Vetter6dacfd22011-09-12 21:30:02 +02001377 if (i915_gem_object_needs_bit17_swizzle(obj))
Eric Anholt280b7132009-03-12 16:56:27 -07001378 i915_gem_object_save_bit_17_swizzle(obj);
1379
Chris Wilson05394f32010-11-08 19:18:58 +00001380 if (obj->madv == I915_MADV_DONTNEED)
1381 obj->dirty = 0;
Chris Wilson3ef94da2009-09-14 16:50:29 +01001382
1383 for (i = 0; i < page_count; i++) {
Chris Wilson05394f32010-11-08 19:18:58 +00001384 if (obj->dirty)
1385 set_page_dirty(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001386
Chris Wilson05394f32010-11-08 19:18:58 +00001387 if (obj->madv == I915_MADV_WILLNEED)
1388 mark_page_accessed(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001389
Chris Wilson05394f32010-11-08 19:18:58 +00001390 page_cache_release(obj->pages[i]);
Chris Wilson3ef94da2009-09-14 16:50:29 +01001391 }
Chris Wilson05394f32010-11-08 19:18:58 +00001392 obj->dirty = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001393
Chris Wilson05394f32010-11-08 19:18:58 +00001394 drm_free_large(obj->pages);
1395 obj->pages = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -07001396}
1397
Chris Wilson54cf91d2010-11-25 18:00:26 +00001398void
Chris Wilson05394f32010-11-08 19:18:58 +00001399i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001400 struct intel_ring_buffer *ring,
1401 u32 seqno)
Eric Anholt673a3942008-07-30 12:06:12 -07001402{
Chris Wilson05394f32010-11-08 19:18:58 +00001403 struct drm_device *dev = obj->base.dev;
Chris Wilson69dc4982010-10-19 10:36:51 +01001404 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetter617dbe22010-02-11 22:16:02 +01001405
Zou Nan hai852835f2010-05-21 09:08:56 +08001406 BUG_ON(ring == NULL);
Chris Wilson05394f32010-11-08 19:18:58 +00001407 obj->ring = ring;
Eric Anholt673a3942008-07-30 12:06:12 -07001408
1409 /* Add a reference if we're newly entering the active list. */
Chris Wilson05394f32010-11-08 19:18:58 +00001410 if (!obj->active) {
1411 drm_gem_object_reference(&obj->base);
1412 obj->active = 1;
Eric Anholt673a3942008-07-30 12:06:12 -07001413 }
Daniel Vettere35a41d2010-02-11 22:13:59 +01001414
Eric Anholt673a3942008-07-30 12:06:12 -07001415 /* Move from whatever list we were on to the tail of execution. */
Chris Wilson05394f32010-11-08 19:18:58 +00001416 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1417 list_move_tail(&obj->ring_list, &ring->active_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001418
Chris Wilson05394f32010-11-08 19:18:58 +00001419 obj->last_rendering_seqno = seqno;
Chris Wilson7dd49062012-03-21 10:48:18 +00001420
Chris Wilsoncaea7472010-11-12 13:53:37 +00001421 if (obj->fenced_gpu_access) {
Chris Wilsoncaea7472010-11-12 13:53:37 +00001422 obj->last_fenced_seqno = seqno;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001423
Chris Wilson7dd49062012-03-21 10:48:18 +00001424 /* Bump MRU to take account of the delayed flush */
1425 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1426 struct drm_i915_fence_reg *reg;
1427
1428 reg = &dev_priv->fence_regs[obj->fence_reg];
1429 list_move_tail(&reg->lru_list,
1430 &dev_priv->mm.fence_list);
1431 }
Chris Wilsoncaea7472010-11-12 13:53:37 +00001432 }
1433}
1434
1435static void
1436i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1437{
1438 list_del_init(&obj->ring_list);
1439 obj->last_rendering_seqno = 0;
Daniel Vetter15a13bb2012-04-12 01:27:57 +02001440 obj->last_fenced_seqno = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001441}
1442
Eric Anholtce44b0e2008-11-06 16:00:31 -08001443static void
Chris Wilson05394f32010-11-08 19:18:58 +00001444i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
Eric Anholtce44b0e2008-11-06 16:00:31 -08001445{
Chris Wilson05394f32010-11-08 19:18:58 +00001446 struct drm_device *dev = obj->base.dev;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001447 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholtce44b0e2008-11-06 16:00:31 -08001448
Chris Wilson05394f32010-11-08 19:18:58 +00001449 BUG_ON(!obj->active);
1450 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001451
1452 i915_gem_object_move_off_active(obj);
1453}
1454
1455static void
1456i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1457{
1458 struct drm_device *dev = obj->base.dev;
1459 struct drm_i915_private *dev_priv = dev->dev_private;
1460
Chris Wilson1b502472012-04-24 15:47:30 +01001461 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsoncaea7472010-11-12 13:53:37 +00001462
1463 BUG_ON(!list_empty(&obj->gpu_write_list));
1464 BUG_ON(!obj->active);
1465 obj->ring = NULL;
1466
1467 i915_gem_object_move_off_active(obj);
1468 obj->fenced_gpu_access = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001469
1470 obj->active = 0;
Chris Wilson87ca9c82010-12-02 09:42:56 +00001471 obj->pending_gpu_write = false;
Chris Wilsoncaea7472010-11-12 13:53:37 +00001472 drm_gem_object_unreference(&obj->base);
1473
1474 WARN_ON(i915_verify_lists(dev));
Eric Anholtce44b0e2008-11-06 16:00:31 -08001475}
Eric Anholt673a3942008-07-30 12:06:12 -07001476
Chris Wilson963b4832009-09-20 23:03:54 +01001477/* Immediately discard the backing storage */
1478static void
Chris Wilson05394f32010-11-08 19:18:58 +00001479i915_gem_object_truncate(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001480{
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001481 struct inode *inode;
Chris Wilson963b4832009-09-20 23:03:54 +01001482
Chris Wilsonae9fed62010-08-07 11:01:30 +01001483 /* Our goal here is to return as much of the memory as
1484 * is possible back to the system as we are called from OOM.
1485 * To do this we must instruct the shmfs to drop all of its
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001486 * backing pages, *now*.
Chris Wilsonae9fed62010-08-07 11:01:30 +01001487 */
Chris Wilson05394f32010-11-08 19:18:58 +00001488 inode = obj->base.filp->f_path.dentry->d_inode;
Hugh Dickinse2377fe2011-06-27 16:18:19 -07001489 shmem_truncate_range(inode, 0, (loff_t)-1);
Chris Wilsonbb6baf72009-09-22 14:24:13 +01001490
Chris Wilsona14917e2012-02-24 21:13:38 +00001491 if (obj->base.map_list.map)
1492 drm_gem_free_mmap_offset(&obj->base);
1493
Chris Wilson05394f32010-11-08 19:18:58 +00001494 obj->madv = __I915_MADV_PURGED;
Chris Wilson963b4832009-09-20 23:03:54 +01001495}
1496
1497static inline int
Chris Wilson05394f32010-11-08 19:18:58 +00001498i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
Chris Wilson963b4832009-09-20 23:03:54 +01001499{
Chris Wilson05394f32010-11-08 19:18:58 +00001500 return obj->madv == I915_MADV_DONTNEED;
Chris Wilson963b4832009-09-20 23:03:54 +01001501}
1502
Eric Anholt673a3942008-07-30 12:06:12 -07001503static void
Chris Wilsondb53a302011-02-03 11:57:46 +00001504i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1505 uint32_t flush_domains)
Daniel Vetter63560392010-02-19 11:51:59 +01001506{
Chris Wilson05394f32010-11-08 19:18:58 +00001507 struct drm_i915_gem_object *obj, *next;
Daniel Vetter63560392010-02-19 11:51:59 +01001508
Chris Wilson05394f32010-11-08 19:18:58 +00001509 list_for_each_entry_safe(obj, next,
Chris Wilson64193402010-10-24 12:38:05 +01001510 &ring->gpu_write_list,
Daniel Vetter63560392010-02-19 11:51:59 +01001511 gpu_write_list) {
Chris Wilson05394f32010-11-08 19:18:58 +00001512 if (obj->base.write_domain & flush_domains) {
1513 uint32_t old_write_domain = obj->base.write_domain;
Daniel Vetter63560392010-02-19 11:51:59 +01001514
Chris Wilson05394f32010-11-08 19:18:58 +00001515 obj->base.write_domain = 0;
1516 list_del_init(&obj->gpu_write_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001517 i915_gem_object_move_to_active(obj, ring,
Chris Wilsondb53a302011-02-03 11:57:46 +00001518 i915_gem_next_request_seqno(ring));
Daniel Vetter63560392010-02-19 11:51:59 +01001519
Daniel Vetter63560392010-02-19 11:51:59 +01001520 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00001521 obj->base.read_domains,
Daniel Vetter63560392010-02-19 11:51:59 +01001522 old_write_domain);
1523 }
1524 }
1525}
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001526
Daniel Vetter53d227f2012-01-25 16:32:49 +01001527static u32
1528i915_gem_get_seqno(struct drm_device *dev)
1529{
1530 drm_i915_private_t *dev_priv = dev->dev_private;
1531 u32 seqno = dev_priv->next_seqno;
1532
1533 /* reserve 0 for non-seqno */
1534 if (++dev_priv->next_seqno == 0)
1535 dev_priv->next_seqno = 1;
1536
1537 return seqno;
1538}
1539
1540u32
1541i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1542{
1543 if (ring->outstanding_lazy_request == 0)
1544 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1545
1546 return ring->outstanding_lazy_request;
1547}
1548
Chris Wilson3cce4692010-10-27 16:11:02 +01001549int
Chris Wilsondb53a302011-02-03 11:57:46 +00001550i915_add_request(struct intel_ring_buffer *ring,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001551 struct drm_file *file,
Chris Wilsondb53a302011-02-03 11:57:46 +00001552 struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001553{
Chris Wilsondb53a302011-02-03 11:57:46 +00001554 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07001555 uint32_t seqno;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001556 u32 request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001557 int was_empty;
Chris Wilson3cce4692010-10-27 16:11:02 +01001558 int ret;
1559
1560 BUG_ON(request == NULL);
Daniel Vetter53d227f2012-01-25 16:32:49 +01001561 seqno = i915_gem_next_request_seqno(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001562
Chris Wilsona71d8d92012-02-15 11:25:36 +00001563 /* Record the position of the start of the request so that
1564 * should we detect the updated seqno part-way through the
1565 * GPU processing the request, we never over-estimate the
1566 * position of the head.
1567 */
1568 request_ring_position = intel_ring_get_tail(ring);
1569
Chris Wilson3cce4692010-10-27 16:11:02 +01001570 ret = ring->add_request(ring, &seqno);
1571 if (ret)
1572 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07001573
Chris Wilsondb53a302011-02-03 11:57:46 +00001574 trace_i915_gem_request_add(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001575
1576 request->seqno = seqno;
Zou Nan hai852835f2010-05-21 09:08:56 +08001577 request->ring = ring;
Chris Wilsona71d8d92012-02-15 11:25:36 +00001578 request->tail = request_ring_position;
Eric Anholt673a3942008-07-30 12:06:12 -07001579 request->emitted_jiffies = jiffies;
Zou Nan hai852835f2010-05-21 09:08:56 +08001580 was_empty = list_empty(&ring->request_list);
1581 list_add_tail(&request->list, &ring->request_list);
1582
Chris Wilsondb53a302011-02-03 11:57:46 +00001583 if (file) {
1584 struct drm_i915_file_private *file_priv = file->driver_priv;
1585
Chris Wilson1c255952010-09-26 11:03:27 +01001586 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001587 request->file_priv = file_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00001588 list_add_tail(&request->client_list,
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001589 &file_priv->mm.request_list);
Chris Wilson1c255952010-09-26 11:03:27 +01001590 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00001591 }
Eric Anholt673a3942008-07-30 12:06:12 -07001592
Daniel Vetter5391d0c2012-01-25 14:03:57 +01001593 ring->outstanding_lazy_request = 0;
Chris Wilsondb53a302011-02-03 11:57:46 +00001594
Ben Gamarif65d9422009-09-14 17:48:44 -04001595 if (!dev_priv->mm.suspended) {
Ben Widawsky3e0dc6b2011-06-29 10:26:42 -07001596 if (i915_enable_hangcheck) {
1597 mod_timer(&dev_priv->hangcheck_timer,
1598 jiffies +
1599 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1600 }
Ben Gamarif65d9422009-09-14 17:48:44 -04001601 if (was_empty)
Chris Wilsonb3b079d2010-09-13 23:44:34 +01001602 queue_delayed_work(dev_priv->wq,
1603 &dev_priv->mm.retire_work, HZ);
Ben Gamarif65d9422009-09-14 17:48:44 -04001604 }
Chris Wilson3cce4692010-10-27 16:11:02 +01001605 return 0;
Eric Anholt673a3942008-07-30 12:06:12 -07001606}
1607
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001608static inline void
1609i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
Eric Anholt673a3942008-07-30 12:06:12 -07001610{
Chris Wilson1c255952010-09-26 11:03:27 +01001611 struct drm_i915_file_private *file_priv = request->file_priv;
Eric Anholt673a3942008-07-30 12:06:12 -07001612
Chris Wilson1c255952010-09-26 11:03:27 +01001613 if (!file_priv)
1614 return;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001615
Chris Wilson1c255952010-09-26 11:03:27 +01001616 spin_lock(&file_priv->mm.lock);
Herton Ronaldo Krzesinski09bfa512011-03-17 13:45:12 +00001617 if (request->file_priv) {
1618 list_del(&request->client_list);
1619 request->file_priv = NULL;
1620 }
Chris Wilson1c255952010-09-26 11:03:27 +01001621 spin_unlock(&file_priv->mm.lock);
Eric Anholt673a3942008-07-30 12:06:12 -07001622}
1623
Chris Wilsondfaae392010-09-22 10:31:52 +01001624static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1625 struct intel_ring_buffer *ring)
Chris Wilson9375e442010-09-19 12:21:28 +01001626{
Chris Wilsondfaae392010-09-22 10:31:52 +01001627 while (!list_empty(&ring->request_list)) {
1628 struct drm_i915_gem_request *request;
Chris Wilson9375e442010-09-19 12:21:28 +01001629
Chris Wilsondfaae392010-09-22 10:31:52 +01001630 request = list_first_entry(&ring->request_list,
1631 struct drm_i915_gem_request,
1632 list);
1633
1634 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001635 i915_gem_request_remove_from_client(request);
Chris Wilsondfaae392010-09-22 10:31:52 +01001636 kfree(request);
1637 }
1638
1639 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001640 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07001641
Chris Wilson05394f32010-11-08 19:18:58 +00001642 obj = list_first_entry(&ring->active_list,
1643 struct drm_i915_gem_object,
1644 ring_list);
Eric Anholt673a3942008-07-30 12:06:12 -07001645
Chris Wilson05394f32010-11-08 19:18:58 +00001646 obj->base.write_domain = 0;
1647 list_del_init(&obj->gpu_write_list);
1648 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001649 }
Eric Anholt673a3942008-07-30 12:06:12 -07001650}
1651
Chris Wilson312817a2010-11-22 11:50:11 +00001652static void i915_gem_reset_fences(struct drm_device *dev)
1653{
1654 struct drm_i915_private *dev_priv = dev->dev_private;
1655 int i;
1656
Daniel Vetter4b9de732011-10-09 21:52:02 +02001657 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson312817a2010-11-22 11:50:11 +00001658 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
Chris Wilson7d2cb392010-11-27 17:38:29 +00001659
Chris Wilsonada726c2012-04-17 15:31:32 +01001660 i915_gem_write_fence(dev, i, NULL);
Chris Wilson7d2cb392010-11-27 17:38:29 +00001661
Chris Wilsonada726c2012-04-17 15:31:32 +01001662 if (reg->obj)
1663 i915_gem_object_fence_lost(reg->obj);
Chris Wilson7d2cb392010-11-27 17:38:29 +00001664
Chris Wilsonada726c2012-04-17 15:31:32 +01001665 reg->pin_count = 0;
1666 reg->obj = NULL;
1667 INIT_LIST_HEAD(&reg->lru_list);
Chris Wilson312817a2010-11-22 11:50:11 +00001668 }
Chris Wilsonada726c2012-04-17 15:31:32 +01001669
1670 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Chris Wilson312817a2010-11-22 11:50:11 +00001671}
1672
Chris Wilson069efc12010-09-30 16:53:18 +01001673void i915_gem_reset(struct drm_device *dev)
Eric Anholt673a3942008-07-30 12:06:12 -07001674{
Chris Wilsondfaae392010-09-22 10:31:52 +01001675 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson05394f32010-11-08 19:18:58 +00001676 struct drm_i915_gem_object *obj;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001677 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001678
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001679 for (i = 0; i < I915_NUM_RINGS; i++)
1680 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
Chris Wilsondfaae392010-09-22 10:31:52 +01001681
1682 /* Remove anything from the flushing lists. The GPU cache is likely
1683 * to be lost on reset along with the data, so simply move the
1684 * lost bo to the inactive list.
1685 */
1686 while (!list_empty(&dev_priv->mm.flushing_list)) {
Akshay Joshi0206e352011-08-16 15:34:10 -04001687 obj = list_first_entry(&dev_priv->mm.flushing_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001688 struct drm_i915_gem_object,
1689 mm_list);
Chris Wilson9375e442010-09-19 12:21:28 +01001690
Chris Wilson05394f32010-11-08 19:18:58 +00001691 obj->base.write_domain = 0;
1692 list_del_init(&obj->gpu_write_list);
1693 i915_gem_object_move_to_inactive(obj);
Chris Wilson9375e442010-09-19 12:21:28 +01001694 }
Chris Wilson9375e442010-09-19 12:21:28 +01001695
Chris Wilsondfaae392010-09-22 10:31:52 +01001696 /* Move everything out of the GPU domains to ensure we do any
1697 * necessary invalidation upon reuse.
1698 */
Chris Wilson05394f32010-11-08 19:18:58 +00001699 list_for_each_entry(obj,
Chris Wilson77f01232010-09-19 12:31:36 +01001700 &dev_priv->mm.inactive_list,
Chris Wilson69dc4982010-10-19 10:36:51 +01001701 mm_list)
Chris Wilson77f01232010-09-19 12:31:36 +01001702 {
Chris Wilson05394f32010-11-08 19:18:58 +00001703 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilson77f01232010-09-19 12:31:36 +01001704 }
Chris Wilson069efc12010-09-30 16:53:18 +01001705
1706 /* The fence registers are invalidated so clear them out */
Chris Wilson312817a2010-11-22 11:50:11 +00001707 i915_gem_reset_fences(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07001708}
1709
1710/**
1711 * This function clears the request list as sequence numbers are passed.
1712 */
Chris Wilsona71d8d92012-02-15 11:25:36 +00001713void
Chris Wilsondb53a302011-02-03 11:57:46 +00001714i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
Eric Anholt673a3942008-07-30 12:06:12 -07001715{
Eric Anholt673a3942008-07-30 12:06:12 -07001716 uint32_t seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001717 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001718
Chris Wilsondb53a302011-02-03 11:57:46 +00001719 if (list_empty(&ring->request_list))
Karsten Wiese6c0594a2009-02-23 15:07:57 +01001720 return;
1721
Chris Wilsondb53a302011-02-03 11:57:46 +00001722 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001723
Chris Wilson78501ea2010-10-27 12:18:21 +01001724 seqno = ring->get_seqno(ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001725
Chris Wilson076e2c02011-01-21 10:07:18 +00001726 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001727 if (seqno >= ring->sync_seqno[i])
1728 ring->sync_seqno[i] = 0;
1729
Zou Nan hai852835f2010-05-21 09:08:56 +08001730 while (!list_empty(&ring->request_list)) {
Eric Anholt673a3942008-07-30 12:06:12 -07001731 struct drm_i915_gem_request *request;
Eric Anholt673a3942008-07-30 12:06:12 -07001732
Zou Nan hai852835f2010-05-21 09:08:56 +08001733 request = list_first_entry(&ring->request_list,
Eric Anholt673a3942008-07-30 12:06:12 -07001734 struct drm_i915_gem_request,
1735 list);
Eric Anholt673a3942008-07-30 12:06:12 -07001736
Chris Wilsondfaae392010-09-22 10:31:52 +01001737 if (!i915_seqno_passed(seqno, request->seqno))
Eric Anholt673a3942008-07-30 12:06:12 -07001738 break;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001739
Chris Wilsondb53a302011-02-03 11:57:46 +00001740 trace_i915_gem_request_retire(ring, request->seqno);
Chris Wilsona71d8d92012-02-15 11:25:36 +00001741 /* We know the GPU must have read the request to have
1742 * sent us the seqno + interrupt, so use the position
1743 * of tail of the request to update the last known position
1744 * of the GPU head.
1745 */
1746 ring->last_retired_head = request->tail;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001747
1748 list_del(&request->list);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01001749 i915_gem_request_remove_from_client(request);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001750 kfree(request);
1751 }
1752
1753 /* Move any buffers on the active list that are no longer referenced
1754 * by the ringbuffer to the flushing/inactive lists as appropriate.
1755 */
1756 while (!list_empty(&ring->active_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +00001757 struct drm_i915_gem_object *obj;
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001758
Akshay Joshi0206e352011-08-16 15:34:10 -04001759 obj = list_first_entry(&ring->active_list,
Chris Wilson05394f32010-11-08 19:18:58 +00001760 struct drm_i915_gem_object,
1761 ring_list);
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001762
Chris Wilson05394f32010-11-08 19:18:58 +00001763 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001764 break;
1765
Chris Wilson05394f32010-11-08 19:18:58 +00001766 if (obj->base.write_domain != 0)
Chris Wilsonb84d5f02010-09-18 01:38:04 +01001767 i915_gem_object_move_to_flushing(obj);
1768 else
1769 i915_gem_object_move_to_inactive(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07001770 }
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001771
Chris Wilsondb53a302011-02-03 11:57:46 +00001772 if (unlikely(ring->trace_irq_seqno &&
1773 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001774 ring->irq_put(ring);
Chris Wilsondb53a302011-02-03 11:57:46 +00001775 ring->trace_irq_seqno = 0;
Chris Wilson9d34e5d2009-09-24 05:26:06 +01001776 }
Chris Wilson23bc5982010-09-29 16:10:57 +01001777
Chris Wilsondb53a302011-02-03 11:57:46 +00001778 WARN_ON(i915_verify_lists(ring->dev));
Eric Anholt673a3942008-07-30 12:06:12 -07001779}
1780
1781void
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001782i915_gem_retire_requests(struct drm_device *dev)
1783{
1784 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001785 int i;
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001786
Chris Wilson1ec14ad2010-12-04 11:30:53 +00001787 for (i = 0; i < I915_NUM_RINGS; i++)
Chris Wilsondb53a302011-02-03 11:57:46 +00001788 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001789}
1790
Daniel Vetter75ef9da2010-08-21 00:25:16 +02001791static void
Eric Anholt673a3942008-07-30 12:06:12 -07001792i915_gem_retire_work_handler(struct work_struct *work)
1793{
1794 drm_i915_private_t *dev_priv;
1795 struct drm_device *dev;
Chris Wilson0a587052011-01-09 21:05:44 +00001796 bool idle;
1797 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07001798
1799 dev_priv = container_of(work, drm_i915_private_t,
1800 mm.retire_work.work);
1801 dev = dev_priv->dev;
1802
Chris Wilson891b48c2010-09-29 12:26:37 +01001803 /* Come back later if the device is busy... */
1804 if (!mutex_trylock(&dev->struct_mutex)) {
1805 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1806 return;
1807 }
1808
Chris Wilsonb09a1fe2010-07-23 23:18:49 +01001809 i915_gem_retire_requests(dev);
Zou Nan haid1b851f2010-05-21 09:08:57 +08001810
Chris Wilson0a587052011-01-09 21:05:44 +00001811 /* Send a periodic flush down the ring so we don't hold onto GEM
1812 * objects indefinitely.
1813 */
1814 idle = true;
1815 for (i = 0; i < I915_NUM_RINGS; i++) {
1816 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1817
1818 if (!list_empty(&ring->gpu_write_list)) {
1819 struct drm_i915_gem_request *request;
1820 int ret;
1821
Chris Wilsondb53a302011-02-03 11:57:46 +00001822 ret = i915_gem_flush_ring(ring,
1823 0, I915_GEM_GPU_DOMAINS);
Chris Wilson0a587052011-01-09 21:05:44 +00001824 request = kzalloc(sizeof(*request), GFP_KERNEL);
1825 if (ret || request == NULL ||
Chris Wilsondb53a302011-02-03 11:57:46 +00001826 i915_add_request(ring, NULL, request))
Chris Wilson0a587052011-01-09 21:05:44 +00001827 kfree(request);
1828 }
1829
1830 idle &= list_empty(&ring->request_list);
1831 }
1832
1833 if (!dev_priv->mm.suspended && !idle)
Eric Anholt9c9fe1f2009-08-03 16:09:16 -07001834 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
Chris Wilson0a587052011-01-09 21:05:44 +00001835
Eric Anholt673a3942008-07-30 12:06:12 -07001836 mutex_unlock(&dev->struct_mutex);
1837}
1838
Chris Wilsondb53a302011-02-03 11:57:46 +00001839/**
1840 * Waits for a sequence number to be signaled, and cleans up the
1841 * request and object lists appropriately for that event.
1842 */
Daniel Vetter5a5a0c62009-09-15 22:57:36 +02001843int
Chris Wilsondb53a302011-02-03 11:57:46 +00001844i915_wait_request(struct intel_ring_buffer *ring,
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001845 uint32_t seqno,
1846 bool do_retire)
Eric Anholt673a3942008-07-30 12:06:12 -07001847{
Chris Wilsondb53a302011-02-03 11:57:46 +00001848 drm_i915_private_t *dev_priv = ring->dev->dev_private;
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001849 u32 ier;
Eric Anholt673a3942008-07-30 12:06:12 -07001850 int ret = 0;
1851
1852 BUG_ON(seqno == 0);
1853
Chris Wilsond9bc7e92011-02-07 13:09:31 +00001854 if (atomic_read(&dev_priv->mm.wedged)) {
1855 struct completion *x = &dev_priv->error_completion;
1856 bool recovery_complete;
1857 unsigned long flags;
1858
1859 /* Give the error handler a chance to run. */
1860 spin_lock_irqsave(&x->wait.lock, flags);
1861 recovery_complete = x->done > 0;
1862 spin_unlock_irqrestore(&x->wait.lock, flags);
1863
1864 return recovery_complete ? -EIO : -EAGAIN;
1865 }
Ben Gamariffed1d02009-09-14 17:48:41 -04001866
Chris Wilson5d97eb62010-11-10 20:40:02 +00001867 if (seqno == ring->outstanding_lazy_request) {
Chris Wilson3cce4692010-10-27 16:11:02 +01001868 struct drm_i915_gem_request *request;
1869
1870 request = kzalloc(sizeof(*request), GFP_KERNEL);
1871 if (request == NULL)
Daniel Vettere35a41d2010-02-11 22:13:59 +01001872 return -ENOMEM;
Chris Wilson3cce4692010-10-27 16:11:02 +01001873
Chris Wilsondb53a302011-02-03 11:57:46 +00001874 ret = i915_add_request(ring, NULL, request);
Chris Wilson3cce4692010-10-27 16:11:02 +01001875 if (ret) {
1876 kfree(request);
1877 return ret;
1878 }
1879
1880 seqno = request->seqno;
Daniel Vettere35a41d2010-02-11 22:13:59 +01001881 }
1882
Chris Wilson78501ea2010-10-27 12:18:21 +01001883 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00001884 if (HAS_PCH_SPLIT(ring->dev))
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001885 ier = I915_READ(DEIER) | I915_READ(GTIER);
Jesse Barnes23e3f9b2012-03-28 13:39:39 -07001886 else if (IS_VALLEYVIEW(ring->dev))
1887 ier = I915_READ(GTIER) | I915_READ(VLV_IER);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08001888 else
1889 ier = I915_READ(IER);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001890 if (!ier) {
1891 DRM_ERROR("something (likely vbetool) disabled "
1892 "interrupts, re-enabling\n");
Chris Wilsonf01c22f2011-06-28 11:48:51 +01001893 ring->dev->driver->irq_preinstall(ring->dev);
1894 ring->dev->driver->irq_postinstall(ring->dev);
Jesse Barnes802c7eb2009-05-05 16:03:48 -07001895 }
1896
Chris Wilsondb53a302011-02-03 11:57:46 +00001897 trace_i915_gem_request_wait_begin(ring, seqno);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001898
Chris Wilsonb2223492010-10-27 15:27:33 +01001899 ring->waiting_seqno = seqno;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001900 if (ring->irq_get(ring)) {
Chris Wilsonce453d82011-02-21 14:43:56 +00001901 if (dev_priv->mm.interruptible)
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001902 ret = wait_event_interruptible(ring->irq_queue,
1903 i915_seqno_passed(ring->get_seqno(ring), seqno)
1904 || atomic_read(&dev_priv->mm.wedged));
1905 else
1906 wait_event(ring->irq_queue,
1907 i915_seqno_passed(ring->get_seqno(ring), seqno)
1908 || atomic_read(&dev_priv->mm.wedged));
Daniel Vetter48764bf2009-09-15 22:57:32 +02001909
Chris Wilsonb13c2b92010-12-13 16:54:50 +00001910 ring->irq_put(ring);
Eric Anholte959b5d2011-12-22 14:55:01 -08001911 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
1912 seqno) ||
1913 atomic_read(&dev_priv->mm.wedged), 3000))
Chris Wilsonb5ba1772010-12-14 12:17:15 +00001914 ret = -EBUSY;
Chris Wilsonb2223492010-10-27 15:27:33 +01001915 ring->waiting_seqno = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01001916
Chris Wilsondb53a302011-02-03 11:57:46 +00001917 trace_i915_gem_request_wait_end(ring, seqno);
Eric Anholt673a3942008-07-30 12:06:12 -07001918 }
Ben Gamariba1234d2009-09-14 17:48:47 -04001919 if (atomic_read(&dev_priv->mm.wedged))
Chris Wilson30dbf0c2010-09-25 10:19:17 +01001920 ret = -EAGAIN;
Eric Anholt673a3942008-07-30 12:06:12 -07001921
Eric Anholt673a3942008-07-30 12:06:12 -07001922 /* Directly dispatch request retiring. While we have the work queue
1923 * to handle this, the waiter on a request often wants an associated
1924 * buffer to have made it to the inactive list, and we would need
1925 * a separate wait queue to handle that.
1926 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001927 if (ret == 0 && do_retire)
Chris Wilsondb53a302011-02-03 11:57:46 +00001928 i915_gem_retire_requests_ring(ring);
Eric Anholt673a3942008-07-30 12:06:12 -07001929
1930 return ret;
1931}
1932
Daniel Vetter48764bf2009-09-15 22:57:32 +02001933/**
Eric Anholt673a3942008-07-30 12:06:12 -07001934 * Ensures that all rendering to the object has completed and the object is
1935 * safe to unbind from the GTT or access from the CPU.
1936 */
Chris Wilson54cf91d2010-11-25 18:00:26 +00001937int
Chris Wilsonce453d82011-02-21 14:43:56 +00001938i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07001939{
Eric Anholt673a3942008-07-30 12:06:12 -07001940 int ret;
1941
Eric Anholte47c68e2008-11-14 13:35:19 -08001942 /* This function only exists to support waiting for existing rendering,
1943 * not for emitting required flushes.
Eric Anholt673a3942008-07-30 12:06:12 -07001944 */
Chris Wilson05394f32010-11-08 19:18:58 +00001945 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
Eric Anholt673a3942008-07-30 12:06:12 -07001946
1947 /* If there is rendering queued on the buffer being evicted, wait for
1948 * it.
1949 */
Chris Wilson05394f32010-11-08 19:18:58 +00001950 if (obj->active) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08001951 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
1952 true);
Chris Wilson2cf34d72010-09-14 13:03:28 +01001953 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07001954 return ret;
1955 }
1956
1957 return 0;
1958}
1959
Ben Widawsky5816d642012-04-11 11:18:19 -07001960/**
1961 * i915_gem_object_sync - sync an object to a ring.
1962 *
1963 * @obj: object which may be in use on another ring.
1964 * @to: ring we wish to use the object on. May be NULL.
1965 *
1966 * This code is meant to abstract object synchronization with the GPU.
1967 * Calling with NULL implies synchronizing the object with the CPU
1968 * rather than a particular GPU ring.
1969 *
1970 * Returns 0 if successful, else propagates up the lower layer error.
1971 */
Ben Widawsky2911a352012-04-05 14:47:36 -07001972int
1973i915_gem_object_sync(struct drm_i915_gem_object *obj,
1974 struct intel_ring_buffer *to)
1975{
1976 struct intel_ring_buffer *from = obj->ring;
1977 u32 seqno;
1978 int ret, idx;
1979
1980 if (from == NULL || to == from)
1981 return 0;
1982
Ben Widawsky5816d642012-04-11 11:18:19 -07001983 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
Ben Widawsky2911a352012-04-05 14:47:36 -07001984 return i915_gem_object_wait_rendering(obj);
1985
1986 idx = intel_ring_sync_index(from, to);
1987
1988 seqno = obj->last_rendering_seqno;
1989 if (seqno <= from->sync_seqno[idx])
1990 return 0;
1991
1992 if (seqno == from->outstanding_lazy_request) {
1993 struct drm_i915_gem_request *request;
1994
1995 request = kzalloc(sizeof(*request), GFP_KERNEL);
1996 if (request == NULL)
1997 return -ENOMEM;
1998
1999 ret = i915_add_request(from, NULL, request);
2000 if (ret) {
2001 kfree(request);
2002 return ret;
2003 }
2004
2005 seqno = request->seqno;
2006 }
2007
Ben Widawsky2911a352012-04-05 14:47:36 -07002008
Ben Widawsky1500f7e2012-04-11 11:18:21 -07002009 ret = to->sync_to(to, from, seqno);
Ben Widawskye3a5a222012-04-11 11:18:20 -07002010 if (!ret)
2011 from->sync_seqno[idx] = seqno;
Ben Widawsky2911a352012-04-05 14:47:36 -07002012
Ben Widawskye3a5a222012-04-11 11:18:20 -07002013 return ret;
Ben Widawsky2911a352012-04-05 14:47:36 -07002014}
2015
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002016static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2017{
2018 u32 old_write_domain, old_read_domains;
2019
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002020 /* Act a barrier for all accesses through the GTT */
2021 mb();
2022
2023 /* Force a pagefault for domain tracking on next user access */
2024 i915_gem_release_mmap(obj);
2025
Keith Packardb97c3d92011-06-24 21:02:59 -07002026 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2027 return;
2028
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002029 old_read_domains = obj->base.read_domains;
2030 old_write_domain = obj->base.write_domain;
2031
2032 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2033 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2034
2035 trace_i915_gem_object_change_domain(obj,
2036 old_read_domains,
2037 old_write_domain);
2038}
2039
Eric Anholt673a3942008-07-30 12:06:12 -07002040/**
2041 * Unbinds an object from the GTT aperture.
2042 */
Jesse Barnes0f973f22009-01-26 17:10:45 -08002043int
Chris Wilson05394f32010-11-08 19:18:58 +00002044i915_gem_object_unbind(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002045{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002046 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002047 int ret = 0;
2048
Chris Wilson05394f32010-11-08 19:18:58 +00002049 if (obj->gtt_space == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002050 return 0;
2051
Chris Wilson05394f32010-11-08 19:18:58 +00002052 if (obj->pin_count != 0) {
Eric Anholt673a3942008-07-30 12:06:12 -07002053 DRM_ERROR("Attempting to unbind pinned buffer\n");
2054 return -EINVAL;
2055 }
2056
Chris Wilsona8198ee2011-04-13 22:04:09 +01002057 ret = i915_gem_object_finish_gpu(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002058 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002059 return ret;
Chris Wilson8dc17752010-07-23 23:18:51 +01002060 /* Continue on if we fail due to EIO, the GPU is hung so we
2061 * should be safe and we need to cleanup or else we might
2062 * cause memory corruption through use-after-free.
2063 */
Chris Wilsona8198ee2011-04-13 22:04:09 +01002064
Chris Wilsonb5ffc9b2011-04-13 22:06:03 +01002065 i915_gem_object_finish_gtt(obj);
Chris Wilsona8198ee2011-04-13 22:04:09 +01002066
2067 /* Move the object to the CPU domain to ensure that
2068 * any possible CPU writes while it's not in the GTT
2069 * are flushed when we go to remap it.
2070 */
2071 if (ret == 0)
2072 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2073 if (ret == -ERESTARTSYS)
2074 return ret;
Chris Wilson812ed4922010-09-30 15:08:57 +01002075 if (ret) {
Chris Wilsona8198ee2011-04-13 22:04:09 +01002076 /* In the event of a disaster, abandon all caches and
2077 * hope for the best.
2078 */
Chris Wilson812ed4922010-09-30 15:08:57 +01002079 i915_gem_clflush_object(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002080 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Chris Wilson812ed4922010-09-30 15:08:57 +01002081 }
Eric Anholt673a3942008-07-30 12:06:12 -07002082
Daniel Vetter96b47b62009-12-15 17:50:00 +01002083 /* release the fence reg _after_ flushing */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002084 ret = i915_gem_object_put_fence(obj);
Chris Wilson1488fc02012-04-24 15:47:31 +01002085 if (ret)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002086 return ret;
Daniel Vetter96b47b62009-12-15 17:50:00 +01002087
Chris Wilsondb53a302011-02-03 11:57:46 +00002088 trace_i915_gem_object_unbind(obj);
2089
Daniel Vetter74898d72012-02-15 23:50:22 +01002090 if (obj->has_global_gtt_mapping)
2091 i915_gem_gtt_unbind_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002092 if (obj->has_aliasing_ppgtt_mapping) {
2093 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2094 obj->has_aliasing_ppgtt_mapping = 0;
2095 }
Daniel Vetter74163902012-02-15 23:50:21 +01002096 i915_gem_gtt_finish_object(obj);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002097
Chris Wilsone5281cc2010-10-28 13:45:36 +01002098 i915_gem_object_put_pages_gtt(obj);
Eric Anholt673a3942008-07-30 12:06:12 -07002099
Chris Wilson6299f992010-11-24 12:23:44 +00002100 list_del_init(&obj->gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002101 list_del_init(&obj->mm_list);
Daniel Vetter75e9e912010-11-04 17:11:09 +01002102 /* Avoid an unnecessary call to unbind on rebind. */
Chris Wilson05394f32010-11-08 19:18:58 +00002103 obj->map_and_fenceable = true;
Eric Anholt673a3942008-07-30 12:06:12 -07002104
Chris Wilson05394f32010-11-08 19:18:58 +00002105 drm_mm_put_block(obj->gtt_space);
2106 obj->gtt_space = NULL;
2107 obj->gtt_offset = 0;
Eric Anholt673a3942008-07-30 12:06:12 -07002108
Chris Wilson05394f32010-11-08 19:18:58 +00002109 if (i915_gem_object_is_purgeable(obj))
Chris Wilson963b4832009-09-20 23:03:54 +01002110 i915_gem_object_truncate(obj);
2111
Chris Wilson8dc17752010-07-23 23:18:51 +01002112 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002113}
2114
Chris Wilson88241782011-01-07 17:09:48 +00002115int
Chris Wilsondb53a302011-02-03 11:57:46 +00002116i915_gem_flush_ring(struct intel_ring_buffer *ring,
Chris Wilson54cf91d2010-11-25 18:00:26 +00002117 uint32_t invalidate_domains,
2118 uint32_t flush_domains)
2119{
Chris Wilson88241782011-01-07 17:09:48 +00002120 int ret;
2121
Chris Wilson36d527d2011-03-19 22:26:49 +00002122 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2123 return 0;
2124
Chris Wilsondb53a302011-02-03 11:57:46 +00002125 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2126
Chris Wilson88241782011-01-07 17:09:48 +00002127 ret = ring->flush(ring, invalidate_domains, flush_domains);
2128 if (ret)
2129 return ret;
2130
Chris Wilson36d527d2011-03-19 22:26:49 +00002131 if (flush_domains & I915_GEM_GPU_DOMAINS)
2132 i915_gem_process_flushing_list(ring, flush_domains);
2133
Chris Wilson88241782011-01-07 17:09:48 +00002134 return 0;
Chris Wilson54cf91d2010-11-25 18:00:26 +00002135}
2136
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002137static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
Chris Wilsona56ba562010-09-28 10:07:56 +01002138{
Chris Wilson88241782011-01-07 17:09:48 +00002139 int ret;
2140
Chris Wilson395b70b2010-10-28 21:28:46 +01002141 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
Chris Wilson64193402010-10-24 12:38:05 +01002142 return 0;
2143
Chris Wilson88241782011-01-07 17:09:48 +00002144 if (!list_empty(&ring->gpu_write_list)) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002145 ret = i915_gem_flush_ring(ring,
Chris Wilson0ac74c62010-12-06 14:36:02 +00002146 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
Chris Wilson88241782011-01-07 17:09:48 +00002147 if (ret)
2148 return ret;
2149 }
2150
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002151 return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
2152 do_retire);
Chris Wilsona56ba562010-09-28 10:07:56 +01002153}
2154
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002155int i915_gpu_idle(struct drm_device *dev, bool do_retire)
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002156{
2157 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002158 int ret, i;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002159
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002160 /* Flush everything onto the inactive list. */
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002161 for (i = 0; i < I915_NUM_RINGS; i++) {
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08002162 ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002163 if (ret)
2164 return ret;
2165 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08002166
Daniel Vetter8a1a49f2010-02-11 22:29:04 +01002167 return 0;
Daniel Vetter4df2faf2010-02-19 11:52:00 +01002168}
2169
Chris Wilson9ce079e2012-04-17 15:31:30 +01002170static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2171 struct drm_i915_gem_object *obj)
Eric Anholt4e901fd2009-10-26 16:44:17 -07002172{
Eric Anholt4e901fd2009-10-26 16:44:17 -07002173 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002174 uint64_t val;
2175
Chris Wilson9ce079e2012-04-17 15:31:30 +01002176 if (obj) {
2177 u32 size = obj->gtt_space->size;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002178
Chris Wilson9ce079e2012-04-17 15:31:30 +01002179 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2180 0xfffff000) << 32;
2181 val |= obj->gtt_offset & 0xfffff000;
2182 val |= (uint64_t)((obj->stride / 128) - 1) <<
2183 SANDYBRIDGE_FENCE_PITCH_SHIFT;
Eric Anholt4e901fd2009-10-26 16:44:17 -07002184
Chris Wilson9ce079e2012-04-17 15:31:30 +01002185 if (obj->tiling_mode == I915_TILING_Y)
2186 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2187 val |= I965_FENCE_REG_VALID;
2188 } else
2189 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002190
Chris Wilson9ce079e2012-04-17 15:31:30 +01002191 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2192 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
Eric Anholt4e901fd2009-10-26 16:44:17 -07002193}
2194
Chris Wilson9ce079e2012-04-17 15:31:30 +01002195static void i965_write_fence_reg(struct drm_device *dev, int reg,
2196 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002197{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002198 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002199 uint64_t val;
2200
Chris Wilson9ce079e2012-04-17 15:31:30 +01002201 if (obj) {
2202 u32 size = obj->gtt_space->size;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002203
Chris Wilson9ce079e2012-04-17 15:31:30 +01002204 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2205 0xfffff000) << 32;
2206 val |= obj->gtt_offset & 0xfffff000;
2207 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2208 if (obj->tiling_mode == I915_TILING_Y)
2209 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2210 val |= I965_FENCE_REG_VALID;
2211 } else
2212 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002213
Chris Wilson9ce079e2012-04-17 15:31:30 +01002214 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2215 POSTING_READ(FENCE_REG_965_0 + reg * 8);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002216}
2217
Chris Wilson9ce079e2012-04-17 15:31:30 +01002218static void i915_write_fence_reg(struct drm_device *dev, int reg,
2219 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002220{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002221 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson9ce079e2012-04-17 15:31:30 +01002222 u32 val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002223
Chris Wilson9ce079e2012-04-17 15:31:30 +01002224 if (obj) {
2225 u32 size = obj->gtt_space->size;
2226 int pitch_val;
2227 int tile_width;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002228
Chris Wilson9ce079e2012-04-17 15:31:30 +01002229 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2230 (size & -size) != size ||
2231 (obj->gtt_offset & (size - 1)),
2232 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2233 obj->gtt_offset, obj->map_and_fenceable, size);
2234
2235 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2236 tile_width = 128;
2237 else
2238 tile_width = 512;
2239
2240 /* Note: pitch better be a power of two tile widths */
2241 pitch_val = obj->stride / tile_width;
2242 pitch_val = ffs(pitch_val) - 1;
2243
2244 val = obj->gtt_offset;
2245 if (obj->tiling_mode == I915_TILING_Y)
2246 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2247 val |= I915_FENCE_SIZE_BITS(size);
2248 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2249 val |= I830_FENCE_REG_VALID;
2250 } else
2251 val = 0;
2252
2253 if (reg < 8)
2254 reg = FENCE_REG_830_0 + reg * 4;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002255 else
Chris Wilson9ce079e2012-04-17 15:31:30 +01002256 reg = FENCE_REG_945_8 + (reg - 8) * 4;
Jesse Barnes0f973f22009-01-26 17:10:45 -08002257
Chris Wilson9ce079e2012-04-17 15:31:30 +01002258 I915_WRITE(reg, val);
2259 POSTING_READ(reg);
Jesse Barnesde151cf2008-11-12 10:03:55 -08002260}
2261
Chris Wilson9ce079e2012-04-17 15:31:30 +01002262static void i830_write_fence_reg(struct drm_device *dev, int reg,
2263 struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002264{
Jesse Barnesde151cf2008-11-12 10:03:55 -08002265 drm_i915_private_t *dev_priv = dev->dev_private;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002266 uint32_t val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002267
Chris Wilson9ce079e2012-04-17 15:31:30 +01002268 if (obj) {
2269 u32 size = obj->gtt_space->size;
2270 uint32_t pitch_val;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002271
Chris Wilson9ce079e2012-04-17 15:31:30 +01002272 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2273 (size & -size) != size ||
2274 (obj->gtt_offset & (size - 1)),
2275 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2276 obj->gtt_offset, size);
Eric Anholte76a16d2009-05-26 17:44:56 -07002277
Chris Wilson9ce079e2012-04-17 15:31:30 +01002278 pitch_val = obj->stride / 128;
2279 pitch_val = ffs(pitch_val) - 1;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002280
Chris Wilson9ce079e2012-04-17 15:31:30 +01002281 val = obj->gtt_offset;
2282 if (obj->tiling_mode == I915_TILING_Y)
2283 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2284 val |= I830_FENCE_SIZE_BITS(size);
2285 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2286 val |= I830_FENCE_REG_VALID;
2287 } else
2288 val = 0;
Daniel Vetterc6642782010-11-12 13:46:18 +00002289
Chris Wilson9ce079e2012-04-17 15:31:30 +01002290 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2291 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2292}
2293
2294static void i915_gem_write_fence(struct drm_device *dev, int reg,
2295 struct drm_i915_gem_object *obj)
2296{
2297 switch (INTEL_INFO(dev)->gen) {
2298 case 7:
2299 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2300 case 5:
2301 case 4: i965_write_fence_reg(dev, reg, obj); break;
2302 case 3: i915_write_fence_reg(dev, reg, obj); break;
2303 case 2: i830_write_fence_reg(dev, reg, obj); break;
2304 default: break;
2305 }
Jesse Barnesde151cf2008-11-12 10:03:55 -08002306}
2307
Chris Wilson61050802012-04-17 15:31:31 +01002308static inline int fence_number(struct drm_i915_private *dev_priv,
2309 struct drm_i915_fence_reg *fence)
2310{
2311 return fence - dev_priv->fence_regs;
2312}
2313
2314static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2315 struct drm_i915_fence_reg *fence,
2316 bool enable)
2317{
2318 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2319 int reg = fence_number(dev_priv, fence);
2320
2321 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2322
2323 if (enable) {
2324 obj->fence_reg = reg;
2325 fence->obj = obj;
2326 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2327 } else {
2328 obj->fence_reg = I915_FENCE_REG_NONE;
2329 fence->obj = NULL;
2330 list_del_init(&fence->lru_list);
2331 }
2332}
2333
Chris Wilsond9e86c02010-11-10 16:40:20 +00002334static int
Chris Wilsona360bb12012-04-17 15:31:25 +01002335i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002336{
2337 int ret;
2338
2339 if (obj->fenced_gpu_access) {
Chris Wilson88241782011-01-07 17:09:48 +00002340 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilson1c293ea2012-04-17 15:31:27 +01002341 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00002342 0, obj->base.write_domain);
2343 if (ret)
2344 return ret;
2345 }
Chris Wilsond9e86c02010-11-10 16:40:20 +00002346
2347 obj->fenced_gpu_access = false;
2348 }
2349
Chris Wilson1c293ea2012-04-17 15:31:27 +01002350 if (obj->last_fenced_seqno) {
Chris Wilson18991842012-04-17 15:31:29 +01002351 ret = i915_wait_request(obj->ring,
2352 obj->last_fenced_seqno,
Chris Wilson14415742012-04-17 15:31:33 +01002353 false);
Chris Wilson18991842012-04-17 15:31:29 +01002354 if (ret)
2355 return ret;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002356
2357 obj->last_fenced_seqno = 0;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002358 }
2359
Chris Wilson63256ec2011-01-04 18:42:07 +00002360 /* Ensure that all CPU reads are completed before installing a fence
2361 * and all writes before removing the fence.
2362 */
2363 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2364 mb();
2365
Chris Wilsond9e86c02010-11-10 16:40:20 +00002366 return 0;
2367}
2368
2369int
2370i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2371{
Chris Wilson61050802012-04-17 15:31:31 +01002372 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002373 int ret;
2374
Chris Wilsona360bb12012-04-17 15:31:25 +01002375 ret = i915_gem_object_flush_fence(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002376 if (ret)
2377 return ret;
2378
Chris Wilson61050802012-04-17 15:31:31 +01002379 if (obj->fence_reg == I915_FENCE_REG_NONE)
2380 return 0;
Chris Wilson1690e1e2011-12-14 13:57:08 +01002381
Chris Wilson61050802012-04-17 15:31:31 +01002382 i915_gem_object_update_fence(obj,
2383 &dev_priv->fence_regs[obj->fence_reg],
2384 false);
2385 i915_gem_object_fence_lost(obj);
Chris Wilsond9e86c02010-11-10 16:40:20 +00002386
2387 return 0;
2388}
2389
2390static struct drm_i915_fence_reg *
Chris Wilsona360bb12012-04-17 15:31:25 +01002391i915_find_fence_reg(struct drm_device *dev)
Daniel Vetterae3db242010-02-19 11:51:58 +01002392{
Daniel Vetterae3db242010-02-19 11:51:58 +01002393 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson8fe301a2012-04-17 15:31:28 +01002394 struct drm_i915_fence_reg *reg, *avail;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002395 int i;
Daniel Vetterae3db242010-02-19 11:51:58 +01002396
2397 /* First try to find a free reg */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002398 avail = NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002399 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2400 reg = &dev_priv->fence_regs[i];
2401 if (!reg->obj)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002402 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002403
Chris Wilson1690e1e2011-12-14 13:57:08 +01002404 if (!reg->pin_count)
Chris Wilsond9e86c02010-11-10 16:40:20 +00002405 avail = reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002406 }
2407
Chris Wilsond9e86c02010-11-10 16:40:20 +00002408 if (avail == NULL)
2409 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002410
2411 /* None available, try to steal one or wait for a user to finish */
Chris Wilsond9e86c02010-11-10 16:40:20 +00002412 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
Chris Wilson1690e1e2011-12-14 13:57:08 +01002413 if (reg->pin_count)
Daniel Vetterae3db242010-02-19 11:51:58 +01002414 continue;
2415
Chris Wilson8fe301a2012-04-17 15:31:28 +01002416 return reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002417 }
2418
Chris Wilson8fe301a2012-04-17 15:31:28 +01002419 return NULL;
Daniel Vetterae3db242010-02-19 11:51:58 +01002420}
2421
Jesse Barnesde151cf2008-11-12 10:03:55 -08002422/**
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002423 * i915_gem_object_get_fence - set up fencing for an object
Jesse Barnesde151cf2008-11-12 10:03:55 -08002424 * @obj: object to map through a fence reg
2425 *
2426 * When mapping objects through the GTT, userspace wants to be able to write
2427 * to them without having to worry about swizzling if the object is tiled.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002428 * This function walks the fence regs looking for a free one for @obj,
2429 * stealing one if it can't find any.
2430 *
2431 * It then sets up the reg based on the object's properties: address, pitch
2432 * and tiling format.
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002433 *
2434 * For an untiled surface, this removes any existing fence.
Jesse Barnesde151cf2008-11-12 10:03:55 -08002435 */
Chris Wilson8c4b8c32009-06-17 22:08:52 +01002436int
Chris Wilson06d98132012-04-17 15:31:24 +01002437i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
Jesse Barnesde151cf2008-11-12 10:03:55 -08002438{
Chris Wilson05394f32010-11-08 19:18:58 +00002439 struct drm_device *dev = obj->base.dev;
Jesse Barnes79e53942008-11-07 14:24:08 -08002440 struct drm_i915_private *dev_priv = dev->dev_private;
Chris Wilson14415742012-04-17 15:31:33 +01002441 bool enable = obj->tiling_mode != I915_TILING_NONE;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002442 struct drm_i915_fence_reg *reg;
Daniel Vetterae3db242010-02-19 11:51:58 +01002443 int ret;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002444
Chris Wilson14415742012-04-17 15:31:33 +01002445 /* Have we updated the tiling parameters upon the object and so
2446 * will need to serialise the write to the associated fence register?
2447 */
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002448 if (obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01002449 ret = i915_gem_object_flush_fence(obj);
2450 if (ret)
2451 return ret;
2452 }
Chris Wilson9a5a53b2012-03-22 15:10:00 +00002453
Chris Wilsond9e86c02010-11-10 16:40:20 +00002454 /* Just update our place in the LRU if our fence is getting reused. */
Chris Wilson05394f32010-11-08 19:18:58 +00002455 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2456 reg = &dev_priv->fence_regs[obj->fence_reg];
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002457 if (!obj->fence_dirty) {
Chris Wilson14415742012-04-17 15:31:33 +01002458 list_move_tail(&reg->lru_list,
2459 &dev_priv->mm.fence_list);
2460 return 0;
2461 }
2462 } else if (enable) {
2463 reg = i915_find_fence_reg(dev);
2464 if (reg == NULL)
2465 return -EDEADLK;
Chris Wilsond9e86c02010-11-10 16:40:20 +00002466
Chris Wilson14415742012-04-17 15:31:33 +01002467 if (reg->obj) {
2468 struct drm_i915_gem_object *old = reg->obj;
2469
2470 ret = i915_gem_object_flush_fence(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00002471 if (ret)
2472 return ret;
2473
Chris Wilson14415742012-04-17 15:31:33 +01002474 i915_gem_object_fence_lost(old);
Chris Wilson29c5a582011-03-17 15:23:22 +00002475 }
Chris Wilson14415742012-04-17 15:31:33 +01002476 } else
Eric Anholta09ba7f2009-08-29 12:49:51 -07002477 return 0;
Eric Anholta09ba7f2009-08-29 12:49:51 -07002478
Chris Wilson14415742012-04-17 15:31:33 +01002479 i915_gem_object_update_fence(obj, reg, enable);
Chris Wilson5d82e3e2012-04-21 16:23:23 +01002480 obj->fence_dirty = false;
Chris Wilson14415742012-04-17 15:31:33 +01002481
Chris Wilson9ce079e2012-04-17 15:31:30 +01002482 return 0;
Jesse Barnesde151cf2008-11-12 10:03:55 -08002483}
2484
2485/**
Eric Anholt673a3942008-07-30 12:06:12 -07002486 * Finds free space in the GTT aperture and binds the object there.
2487 */
2488static int
Chris Wilson05394f32010-11-08 19:18:58 +00002489i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
Daniel Vetter920afa72010-09-16 17:54:23 +02002490 unsigned alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01002491 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07002492{
Chris Wilson05394f32010-11-08 19:18:58 +00002493 struct drm_device *dev = obj->base.dev;
Eric Anholt673a3942008-07-30 12:06:12 -07002494 drm_i915_private_t *dev_priv = dev->dev_private;
Eric Anholt673a3942008-07-30 12:06:12 -07002495 struct drm_mm_node *free_space;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002496 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
Daniel Vetter5e783302010-11-14 22:32:36 +01002497 u32 size, fence_size, fence_alignment, unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002498 bool mappable, fenceable;
Chris Wilson07f73f62009-09-14 16:50:30 +01002499 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07002500
Chris Wilson05394f32010-11-08 19:18:58 +00002501 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilson3ef94da2009-09-14 16:50:29 +01002502 DRM_ERROR("Attempting to bind a purgeable object\n");
2503 return -EINVAL;
2504 }
2505
Chris Wilsone28f8712011-07-18 13:11:49 -07002506 fence_size = i915_gem_get_gtt_size(dev,
2507 obj->base.size,
2508 obj->tiling_mode);
2509 fence_alignment = i915_gem_get_gtt_alignment(dev,
2510 obj->base.size,
2511 obj->tiling_mode);
2512 unfenced_alignment =
2513 i915_gem_get_unfenced_gtt_alignment(dev,
2514 obj->base.size,
2515 obj->tiling_mode);
Chris Wilsona00b10c2010-09-24 21:15:47 +01002516
Eric Anholt673a3942008-07-30 12:06:12 -07002517 if (alignment == 0)
Daniel Vetter5e783302010-11-14 22:32:36 +01002518 alignment = map_and_fenceable ? fence_alignment :
2519 unfenced_alignment;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002520 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
Eric Anholt673a3942008-07-30 12:06:12 -07002521 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2522 return -EINVAL;
2523 }
2524
Chris Wilson05394f32010-11-08 19:18:58 +00002525 size = map_and_fenceable ? fence_size : obj->base.size;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002526
Chris Wilson654fc602010-05-27 13:18:21 +01002527 /* If the object is bigger than the entire aperture, reject it early
2528 * before evicting everything in a vain attempt to find space.
2529 */
Chris Wilson05394f32010-11-08 19:18:58 +00002530 if (obj->base.size >
Daniel Vetter75e9e912010-11-04 17:11:09 +01002531 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
Chris Wilson654fc602010-05-27 13:18:21 +01002532 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2533 return -E2BIG;
2534 }
2535
Eric Anholt673a3942008-07-30 12:06:12 -07002536 search_free:
Daniel Vetter75e9e912010-11-04 17:11:09 +01002537 if (map_and_fenceable)
Daniel Vetter920afa72010-09-16 17:54:23 +02002538 free_space =
2539 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002540 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002541 dev_priv->mm.gtt_mappable_end,
2542 0);
2543 else
2544 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002545 size, alignment, 0);
Daniel Vetter920afa72010-09-16 17:54:23 +02002546
2547 if (free_space != NULL) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01002548 if (map_and_fenceable)
Chris Wilson05394f32010-11-08 19:18:58 +00002549 obj->gtt_space =
Daniel Vetter920afa72010-09-16 17:54:23 +02002550 drm_mm_get_block_range_generic(free_space,
Chris Wilsona00b10c2010-09-24 21:15:47 +01002551 size, alignment, 0,
Daniel Vetter920afa72010-09-16 17:54:23 +02002552 dev_priv->mm.gtt_mappable_end,
2553 0);
2554 else
Chris Wilson05394f32010-11-08 19:18:58 +00002555 obj->gtt_space =
Chris Wilsona00b10c2010-09-24 21:15:47 +01002556 drm_mm_get_block(free_space, size, alignment);
Daniel Vetter920afa72010-09-16 17:54:23 +02002557 }
Chris Wilson05394f32010-11-08 19:18:58 +00002558 if (obj->gtt_space == NULL) {
Eric Anholt673a3942008-07-30 12:06:12 -07002559 /* If the gtt is empty and we're still having trouble
2560 * fitting our object in, we're out of memory.
2561 */
Daniel Vetter75e9e912010-11-04 17:11:09 +01002562 ret = i915_gem_evict_something(dev, size, alignment,
2563 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01002564 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07002565 return ret;
Chris Wilson97311292009-09-21 00:22:34 +01002566
Eric Anholt673a3942008-07-30 12:06:12 -07002567 goto search_free;
2568 }
2569
Chris Wilsone5281cc2010-10-28 13:45:36 +01002570 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
Eric Anholt673a3942008-07-30 12:06:12 -07002571 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00002572 drm_mm_put_block(obj->gtt_space);
2573 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002574
2575 if (ret == -ENOMEM) {
Chris Wilson809b6332011-01-10 17:33:15 +00002576 /* first try to reclaim some memory by clearing the GTT */
2577 ret = i915_gem_evict_everything(dev, false);
Chris Wilson07f73f62009-09-14 16:50:30 +01002578 if (ret) {
Chris Wilson07f73f62009-09-14 16:50:30 +01002579 /* now try to shrink everyone else */
Chris Wilson4bdadb92010-01-27 13:36:32 +00002580 if (gfpmask) {
2581 gfpmask = 0;
2582 goto search_free;
Chris Wilson07f73f62009-09-14 16:50:30 +01002583 }
2584
Chris Wilson809b6332011-01-10 17:33:15 +00002585 return -ENOMEM;
Chris Wilson07f73f62009-09-14 16:50:30 +01002586 }
2587
2588 goto search_free;
2589 }
2590
Eric Anholt673a3942008-07-30 12:06:12 -07002591 return ret;
2592 }
2593
Daniel Vetter74163902012-02-15 23:50:21 +01002594 ret = i915_gem_gtt_prepare_object(obj);
Daniel Vetter7c2e6fd2010-11-06 10:10:47 +01002595 if (ret) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01002596 i915_gem_object_put_pages_gtt(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00002597 drm_mm_put_block(obj->gtt_space);
2598 obj->gtt_space = NULL;
Chris Wilson07f73f62009-09-14 16:50:30 +01002599
Chris Wilson809b6332011-01-10 17:33:15 +00002600 if (i915_gem_evict_everything(dev, false))
Chris Wilson07f73f62009-09-14 16:50:30 +01002601 return ret;
Chris Wilson07f73f62009-09-14 16:50:30 +01002602
2603 goto search_free;
Eric Anholt673a3942008-07-30 12:06:12 -07002604 }
Eric Anholt673a3942008-07-30 12:06:12 -07002605
Daniel Vetter0ebb9822012-02-15 23:50:24 +01002606 if (!dev_priv->mm.aliasing_ppgtt)
2607 i915_gem_gtt_bind_object(obj, obj->cache_level);
Eric Anholt673a3942008-07-30 12:06:12 -07002608
Chris Wilson6299f992010-11-24 12:23:44 +00002609 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
Chris Wilson05394f32010-11-08 19:18:58 +00002610 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
Chris Wilsonbf1a1092010-08-07 11:01:20 +01002611
Eric Anholt673a3942008-07-30 12:06:12 -07002612 /* Assert that the object is not currently in any GPU domain. As it
2613 * wasn't in the GTT, there shouldn't be any way it could have been in
2614 * a GPU cache
2615 */
Chris Wilson05394f32010-11-08 19:18:58 +00002616 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2617 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
Eric Anholt673a3942008-07-30 12:06:12 -07002618
Chris Wilson6299f992010-11-24 12:23:44 +00002619 obj->gtt_offset = obj->gtt_space->start;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002620
Daniel Vetter75e9e912010-11-04 17:11:09 +01002621 fenceable =
Chris Wilson05394f32010-11-08 19:18:58 +00002622 obj->gtt_space->size == fence_size &&
Akshay Joshi0206e352011-08-16 15:34:10 -04002623 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002624
Daniel Vetter75e9e912010-11-04 17:11:09 +01002625 mappable =
Chris Wilson05394f32010-11-08 19:18:58 +00002626 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Chris Wilsona00b10c2010-09-24 21:15:47 +01002627
Chris Wilson05394f32010-11-08 19:18:58 +00002628 obj->map_and_fenceable = mappable && fenceable;
Daniel Vetter75e9e912010-11-04 17:11:09 +01002629
Chris Wilsondb53a302011-02-03 11:57:46 +00002630 trace_i915_gem_object_bind(obj, map_and_fenceable);
Eric Anholt673a3942008-07-30 12:06:12 -07002631 return 0;
2632}
2633
2634void
Chris Wilson05394f32010-11-08 19:18:58 +00002635i915_gem_clflush_object(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07002636{
Eric Anholt673a3942008-07-30 12:06:12 -07002637 /* If we don't have a page list set up, then we're not pinned
2638 * to GPU, and we can ignore the cache flush because it'll happen
2639 * again at bind time.
2640 */
Chris Wilson05394f32010-11-08 19:18:58 +00002641 if (obj->pages == NULL)
Eric Anholt673a3942008-07-30 12:06:12 -07002642 return;
2643
Chris Wilson9c23f7f2011-03-29 16:59:52 -07002644 /* If the GPU is snooping the contents of the CPU cache,
2645 * we do not need to manually clear the CPU cache lines. However,
2646 * the caches are only snooped when the render cache is
2647 * flushed/invalidated. As we always have to emit invalidations
2648 * and flushes when moving into and out of the RENDER domain, correct
2649 * snooping behaviour occurs naturally as the result of our domain
2650 * tracking.
2651 */
2652 if (obj->cache_level != I915_CACHE_NONE)
2653 return;
2654
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002655 trace_i915_gem_object_clflush(obj);
Eric Anholtcfa16a02009-05-26 18:46:16 -07002656
Chris Wilson05394f32010-11-08 19:18:58 +00002657 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
Eric Anholt673a3942008-07-30 12:06:12 -07002658}
2659
Eric Anholte47c68e2008-11-14 13:35:19 -08002660/** Flushes any GPU write domain for the object if it's dirty. */
Chris Wilson88241782011-01-07 17:09:48 +00002661static int
Chris Wilson3619df02010-11-28 15:37:17 +00002662i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002663{
Chris Wilson05394f32010-11-08 19:18:58 +00002664 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson88241782011-01-07 17:09:48 +00002665 return 0;
Eric Anholte47c68e2008-11-14 13:35:19 -08002666
2667 /* Queue the GPU write cache flushing we need. */
Chris Wilsondb53a302011-02-03 11:57:46 +00002668 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002669}
2670
2671/** Flushes the GTT write domain for the object if it's dirty. */
2672static void
Chris Wilson05394f32010-11-08 19:18:58 +00002673i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002674{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002675 uint32_t old_write_domain;
2676
Chris Wilson05394f32010-11-08 19:18:58 +00002677 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
Eric Anholte47c68e2008-11-14 13:35:19 -08002678 return;
2679
Chris Wilson63256ec2011-01-04 18:42:07 +00002680 /* No actual flushing is required for the GTT write domain. Writes
Eric Anholte47c68e2008-11-14 13:35:19 -08002681 * to it immediately go to main memory as far as we know, so there's
2682 * no chipset flush. It also doesn't land in render cache.
Chris Wilson63256ec2011-01-04 18:42:07 +00002683 *
2684 * However, we do have to enforce the order so that all writes through
2685 * the GTT land before any writes to the device, such as updates to
2686 * the GATT itself.
Eric Anholte47c68e2008-11-14 13:35:19 -08002687 */
Chris Wilson63256ec2011-01-04 18:42:07 +00002688 wmb();
2689
Chris Wilson05394f32010-11-08 19:18:58 +00002690 old_write_domain = obj->base.write_domain;
2691 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002692
2693 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002694 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002695 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002696}
2697
2698/** Flushes the CPU write domain for the object if it's dirty. */
2699static void
Chris Wilson05394f32010-11-08 19:18:58 +00002700i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
Eric Anholte47c68e2008-11-14 13:35:19 -08002701{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002702 uint32_t old_write_domain;
Eric Anholte47c68e2008-11-14 13:35:19 -08002703
Chris Wilson05394f32010-11-08 19:18:58 +00002704 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
Eric Anholte47c68e2008-11-14 13:35:19 -08002705 return;
2706
2707 i915_gem_clflush_object(obj);
Daniel Vetter40ce6572010-11-05 18:12:18 +01002708 intel_gtt_chipset_flush();
Chris Wilson05394f32010-11-08 19:18:58 +00002709 old_write_domain = obj->base.write_domain;
2710 obj->base.write_domain = 0;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002711
2712 trace_i915_gem_object_change_domain(obj,
Chris Wilson05394f32010-11-08 19:18:58 +00002713 obj->base.read_domains,
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002714 old_write_domain);
Eric Anholte47c68e2008-11-14 13:35:19 -08002715}
2716
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002717/**
2718 * Moves a single object to the GTT read, and possibly write domain.
2719 *
2720 * This function returns when the move is complete, including waiting on
2721 * flushes to occur.
2722 */
Jesse Barnes79e53942008-11-07 14:24:08 -08002723int
Chris Wilson20217462010-11-23 15:26:33 +00002724i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002725{
Chris Wilson8325a092012-04-24 15:52:35 +01002726 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002727 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002728 int ret;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002729
Eric Anholt02354392008-11-26 13:58:13 -08002730 /* Not valid to be called on unbound objects. */
Chris Wilson05394f32010-11-08 19:18:58 +00002731 if (obj->gtt_space == NULL)
Eric Anholt02354392008-11-26 13:58:13 -08002732 return -EINVAL;
2733
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002734 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2735 return 0;
2736
Chris Wilson88241782011-01-07 17:09:48 +00002737 ret = i915_gem_object_flush_gpu_write_domain(obj);
2738 if (ret)
2739 return ret;
2740
Chris Wilson87ca9c82010-12-02 09:42:56 +00002741 if (obj->pending_gpu_write || write) {
Chris Wilsonce453d82011-02-21 14:43:56 +00002742 ret = i915_gem_object_wait_rendering(obj);
Chris Wilson87ca9c82010-12-02 09:42:56 +00002743 if (ret)
2744 return ret;
2745 }
Chris Wilson2dafb1e2010-06-07 14:03:05 +01002746
Chris Wilson72133422010-09-13 23:56:38 +01002747 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002748
Chris Wilson05394f32010-11-08 19:18:58 +00002749 old_write_domain = obj->base.write_domain;
2750 old_read_domains = obj->base.read_domains;
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002751
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002752 /* It should now be out of any other write domains, and we can update
2753 * the domain values for our changes.
2754 */
Chris Wilson05394f32010-11-08 19:18:58 +00002755 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2756 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Eric Anholte47c68e2008-11-14 13:35:19 -08002757 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002758 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2759 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2760 obj->dirty = 1;
Eric Anholte47c68e2008-11-14 13:35:19 -08002761 }
2762
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002763 trace_i915_gem_object_change_domain(obj,
2764 old_read_domains,
2765 old_write_domain);
2766
Chris Wilson8325a092012-04-24 15:52:35 +01002767 /* And bump the LRU for this access */
2768 if (i915_gem_object_is_inactive(obj))
2769 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2770
Eric Anholte47c68e2008-11-14 13:35:19 -08002771 return 0;
2772}
2773
Chris Wilsone4ffd172011-04-04 09:44:39 +01002774int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2775 enum i915_cache_level cache_level)
2776{
Daniel Vetter7bddb012012-02-09 17:15:47 +01002777 struct drm_device *dev = obj->base.dev;
2778 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsone4ffd172011-04-04 09:44:39 +01002779 int ret;
2780
2781 if (obj->cache_level == cache_level)
2782 return 0;
2783
2784 if (obj->pin_count) {
2785 DRM_DEBUG("can not change the cache level of pinned objects\n");
2786 return -EBUSY;
2787 }
2788
2789 if (obj->gtt_space) {
2790 ret = i915_gem_object_finish_gpu(obj);
2791 if (ret)
2792 return ret;
2793
2794 i915_gem_object_finish_gtt(obj);
2795
2796 /* Before SandyBridge, you could not use tiling or fence
2797 * registers with snooped memory, so relinquish any fences
2798 * currently pointing to our region in the aperture.
2799 */
2800 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2801 ret = i915_gem_object_put_fence(obj);
2802 if (ret)
2803 return ret;
2804 }
2805
Daniel Vetter74898d72012-02-15 23:50:22 +01002806 if (obj->has_global_gtt_mapping)
2807 i915_gem_gtt_bind_object(obj, cache_level);
Daniel Vetter7bddb012012-02-09 17:15:47 +01002808 if (obj->has_aliasing_ppgtt_mapping)
2809 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2810 obj, cache_level);
Chris Wilsone4ffd172011-04-04 09:44:39 +01002811 }
2812
2813 if (cache_level == I915_CACHE_NONE) {
2814 u32 old_read_domains, old_write_domain;
2815
2816 /* If we're coming from LLC cached, then we haven't
2817 * actually been tracking whether the data is in the
2818 * CPU cache or not, since we only allow one bit set
2819 * in obj->write_domain and have been skipping the clflushes.
2820 * Just set it to the CPU cache for now.
2821 */
2822 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2823 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2824
2825 old_read_domains = obj->base.read_domains;
2826 old_write_domain = obj->base.write_domain;
2827
2828 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2829 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2830
2831 trace_i915_gem_object_change_domain(obj,
2832 old_read_domains,
2833 old_write_domain);
2834 }
2835
2836 obj->cache_level = cache_level;
2837 return 0;
2838}
2839
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002840/*
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002841 * Prepare buffer for display plane (scanout, cursors, etc).
2842 * Can be called from an uninterruptible phase (modesetting) and allows
2843 * any flushes to be pipelined (for pageflips).
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002844 */
2845int
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002846i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2847 u32 alignment,
Chris Wilson919926a2010-11-12 13:42:53 +00002848 struct intel_ring_buffer *pipelined)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002849{
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002850 u32 old_read_domains, old_write_domain;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002851 int ret;
2852
Chris Wilson88241782011-01-07 17:09:48 +00002853 ret = i915_gem_object_flush_gpu_write_domain(obj);
2854 if (ret)
2855 return ret;
2856
Chris Wilson0be73282010-12-06 14:36:27 +00002857 if (pipelined != obj->ring) {
Ben Widawsky2911a352012-04-05 14:47:36 -07002858 ret = i915_gem_object_sync(obj, pipelined);
2859 if (ret)
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002860 return ret;
2861 }
2862
Eric Anholta7ef0642011-03-29 16:59:54 -07002863 /* The display engine is not coherent with the LLC cache on gen6. As
2864 * a result, we make sure that the pinning that is about to occur is
2865 * done with uncached PTEs. This is lowest common denominator for all
2866 * chipsets.
2867 *
2868 * However for gen6+, we could do better by using the GFDT bit instead
2869 * of uncaching, which would allow us to flush all the LLC-cached data
2870 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2871 */
2872 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2873 if (ret)
2874 return ret;
2875
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002876 /* As the user may map the buffer once pinned in the display plane
2877 * (e.g. libkms for the bootup splash), we have to ensure that we
2878 * always use map_and_fenceable for all scanout buffers.
2879 */
2880 ret = i915_gem_object_pin(obj, alignment, true);
2881 if (ret)
2882 return ret;
2883
Chris Wilsonb118c1e2010-05-27 13:18:14 +01002884 i915_gem_object_flush_cpu_write_domain(obj);
2885
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002886 old_write_domain = obj->base.write_domain;
Chris Wilson05394f32010-11-08 19:18:58 +00002887 old_read_domains = obj->base.read_domains;
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002888
2889 /* It should now be out of any other write domains, and we can update
2890 * the domain values for our changes.
2891 */
2892 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
Chris Wilson05394f32010-11-08 19:18:58 +00002893 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002894
2895 trace_i915_gem_object_change_domain(obj,
2896 old_read_domains,
Chris Wilson2da3b9b2011-04-14 09:41:17 +01002897 old_write_domain);
Zhenyu Wangb9241ea2009-11-25 13:09:39 +08002898
2899 return 0;
2900}
2901
Chris Wilson85345512010-11-13 09:49:11 +00002902int
Chris Wilsona8198ee2011-04-13 22:04:09 +01002903i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
Chris Wilson85345512010-11-13 09:49:11 +00002904{
Chris Wilson88241782011-01-07 17:09:48 +00002905 int ret;
2906
Chris Wilsona8198ee2011-04-13 22:04:09 +01002907 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
Chris Wilson85345512010-11-13 09:49:11 +00002908 return 0;
2909
Chris Wilson88241782011-01-07 17:09:48 +00002910 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00002911 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
Chris Wilson88241782011-01-07 17:09:48 +00002912 if (ret)
2913 return ret;
2914 }
Chris Wilson85345512010-11-13 09:49:11 +00002915
Chris Wilsonc501ae72011-12-14 13:57:23 +01002916 ret = i915_gem_object_wait_rendering(obj);
2917 if (ret)
2918 return ret;
2919
Chris Wilsona8198ee2011-04-13 22:04:09 +01002920 /* Ensure that we invalidate the GPU's caches and TLBs. */
2921 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
Chris Wilsonc501ae72011-12-14 13:57:23 +01002922 return 0;
Chris Wilson85345512010-11-13 09:49:11 +00002923}
2924
Eric Anholte47c68e2008-11-14 13:35:19 -08002925/**
2926 * Moves a single object to the CPU read, and possibly write domain.
2927 *
2928 * This function returns when the move is complete, including waiting on
2929 * flushes to occur.
2930 */
Chris Wilsondabdfe02012-03-26 10:10:27 +02002931int
Chris Wilson919926a2010-11-12 13:42:53 +00002932i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
Eric Anholte47c68e2008-11-14 13:35:19 -08002933{
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002934 uint32_t old_write_domain, old_read_domains;
Eric Anholte47c68e2008-11-14 13:35:19 -08002935 int ret;
2936
Chris Wilson8d7e3de2011-02-07 15:23:02 +00002937 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2938 return 0;
2939
Chris Wilson88241782011-01-07 17:09:48 +00002940 ret = i915_gem_object_flush_gpu_write_domain(obj);
2941 if (ret)
2942 return ret;
2943
Chris Wilsonf8413192012-04-10 11:52:50 +01002944 if (write || obj->pending_gpu_write) {
2945 ret = i915_gem_object_wait_rendering(obj);
2946 if (ret)
2947 return ret;
2948 }
Eric Anholte47c68e2008-11-14 13:35:19 -08002949
2950 i915_gem_object_flush_gtt_write_domain(obj);
2951
Chris Wilson05394f32010-11-08 19:18:58 +00002952 old_write_domain = obj->base.write_domain;
2953 old_read_domains = obj->base.read_domains;
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002954
Eric Anholte47c68e2008-11-14 13:35:19 -08002955 /* Flush the CPU cache if it's still invalid. */
Chris Wilson05394f32010-11-08 19:18:58 +00002956 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
Eric Anholte47c68e2008-11-14 13:35:19 -08002957 i915_gem_clflush_object(obj);
Eric Anholte47c68e2008-11-14 13:35:19 -08002958
Chris Wilson05394f32010-11-08 19:18:58 +00002959 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002960 }
2961
2962 /* It should now be out of any other write domains, and we can update
2963 * the domain values for our changes.
2964 */
Chris Wilson05394f32010-11-08 19:18:58 +00002965 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
Eric Anholte47c68e2008-11-14 13:35:19 -08002966
2967 /* If we're writing through the CPU, then the GPU read domains will
2968 * need to be invalidated at next use.
2969 */
2970 if (write) {
Chris Wilson05394f32010-11-08 19:18:58 +00002971 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2972 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
Eric Anholte47c68e2008-11-14 13:35:19 -08002973 }
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002974
Chris Wilson1c5d22f2009-08-25 11:15:50 +01002975 trace_i915_gem_object_change_domain(obj,
2976 old_read_domains,
2977 old_write_domain);
2978
Eric Anholt2ef7eea2008-11-10 10:53:25 -08002979 return 0;
2980}
2981
Eric Anholt673a3942008-07-30 12:06:12 -07002982/* Throttle our rendering by waiting until the ring has completed our requests
2983 * emitted over 20 msec ago.
2984 *
Eric Anholtb9624422009-06-03 07:27:35 +00002985 * Note that if we were to use the current jiffies each time around the loop,
2986 * we wouldn't escape the function with any frames outstanding if the time to
2987 * render a frame was over 20ms.
2988 *
Eric Anholt673a3942008-07-30 12:06:12 -07002989 * This should get us reasonable parallelism between CPU and GPU but also
2990 * relatively low latency when blocking on a particular request to finish.
2991 */
2992static int
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002993i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07002994{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002995 struct drm_i915_private *dev_priv = dev->dev_private;
2996 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00002997 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01002998 struct drm_i915_gem_request *request;
2999 struct intel_ring_buffer *ring = NULL;
3000 u32 seqno = 0;
3001 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003002
Chris Wilsone110e8d2011-01-26 15:39:14 +00003003 if (atomic_read(&dev_priv->mm.wedged))
3004 return -EIO;
3005
Chris Wilson1c255952010-09-26 11:03:27 +01003006 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003007 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
Eric Anholtb9624422009-06-03 07:27:35 +00003008 if (time_after_eq(request->emitted_jiffies, recent_enough))
3009 break;
3010
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003011 ring = request->ring;
3012 seqno = request->seqno;
Eric Anholtb9624422009-06-03 07:27:35 +00003013 }
Chris Wilson1c255952010-09-26 11:03:27 +01003014 spin_unlock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003015
3016 if (seqno == 0)
3017 return 0;
3018
3019 ret = 0;
Chris Wilson78501ea2010-10-27 12:18:21 +01003020 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003021 /* And wait for the seqno passing without holding any locks and
3022 * causing extra latency for others. This is safe as the irq
3023 * generation is designed to be run atomically and so is
3024 * lockless.
3025 */
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003026 if (ring->irq_get(ring)) {
3027 ret = wait_event_interruptible(ring->irq_queue,
3028 i915_seqno_passed(ring->get_seqno(ring), seqno)
3029 || atomic_read(&dev_priv->mm.wedged));
3030 ring->irq_put(ring);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003031
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003032 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3033 ret = -EIO;
Eric Anholte959b5d2011-12-22 14:55:01 -08003034 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3035 seqno) ||
Eric Anholt7ea29b12011-12-22 14:54:59 -08003036 atomic_read(&dev_priv->mm.wedged), 3000)) {
3037 ret = -EBUSY;
Chris Wilsonb13c2b92010-12-13 16:54:50 +00003038 }
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003039 }
3040
3041 if (ret == 0)
3042 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
Eric Anholtb9624422009-06-03 07:27:35 +00003043
Eric Anholt673a3942008-07-30 12:06:12 -07003044 return ret;
3045}
3046
Eric Anholt673a3942008-07-30 12:06:12 -07003047int
Chris Wilson05394f32010-11-08 19:18:58 +00003048i915_gem_object_pin(struct drm_i915_gem_object *obj,
3049 uint32_t alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003050 bool map_and_fenceable)
Eric Anholt673a3942008-07-30 12:06:12 -07003051{
Eric Anholt673a3942008-07-30 12:06:12 -07003052 int ret;
3053
Chris Wilson05394f32010-11-08 19:18:58 +00003054 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003055
Chris Wilson05394f32010-11-08 19:18:58 +00003056 if (obj->gtt_space != NULL) {
3057 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3058 (map_and_fenceable && !obj->map_and_fenceable)) {
3059 WARN(obj->pin_count,
Chris Wilsonae7d49d2010-08-04 12:37:41 +01003060 "bo is already pinned with incorrect alignment:"
Daniel Vetter75e9e912010-11-04 17:11:09 +01003061 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3062 " obj->map_and_fenceable=%d\n",
Chris Wilson05394f32010-11-08 19:18:58 +00003063 obj->gtt_offset, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003064 map_and_fenceable,
Chris Wilson05394f32010-11-08 19:18:58 +00003065 obj->map_and_fenceable);
Chris Wilsonac0c6b52010-05-27 13:18:18 +01003066 ret = i915_gem_object_unbind(obj);
3067 if (ret)
3068 return ret;
3069 }
3070 }
3071
Chris Wilson05394f32010-11-08 19:18:58 +00003072 if (obj->gtt_space == NULL) {
Chris Wilsona00b10c2010-09-24 21:15:47 +01003073 ret = i915_gem_object_bind_to_gtt(obj, alignment,
Daniel Vetter75e9e912010-11-04 17:11:09 +01003074 map_and_fenceable);
Chris Wilson97311292009-09-21 00:22:34 +01003075 if (ret)
Eric Anholt673a3942008-07-30 12:06:12 -07003076 return ret;
Chris Wilson22c344e2009-02-11 14:26:45 +00003077 }
Jesse Barnes76446ca2009-12-17 22:05:42 -05003078
Daniel Vetter74898d72012-02-15 23:50:22 +01003079 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3080 i915_gem_gtt_bind_object(obj, obj->cache_level);
3081
Chris Wilson1b502472012-04-24 15:47:30 +01003082 obj->pin_count++;
Chris Wilson6299f992010-11-24 12:23:44 +00003083 obj->pin_mappable |= map_and_fenceable;
Eric Anholt673a3942008-07-30 12:06:12 -07003084
3085 return 0;
3086}
3087
3088void
Chris Wilson05394f32010-11-08 19:18:58 +00003089i915_gem_object_unpin(struct drm_i915_gem_object *obj)
Eric Anholt673a3942008-07-30 12:06:12 -07003090{
Chris Wilson05394f32010-11-08 19:18:58 +00003091 BUG_ON(obj->pin_count == 0);
3092 BUG_ON(obj->gtt_space == NULL);
Eric Anholt673a3942008-07-30 12:06:12 -07003093
Chris Wilson1b502472012-04-24 15:47:30 +01003094 if (--obj->pin_count == 0)
Chris Wilson6299f992010-11-24 12:23:44 +00003095 obj->pin_mappable = false;
Eric Anholt673a3942008-07-30 12:06:12 -07003096}
3097
3098int
3099i915_gem_pin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003100 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003101{
3102 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003103 struct drm_i915_gem_object *obj;
Eric Anholt673a3942008-07-30 12:06:12 -07003104 int ret;
3105
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003106 ret = i915_mutex_lock_interruptible(dev);
3107 if (ret)
3108 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003109
Chris Wilson05394f32010-11-08 19:18:58 +00003110 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003111 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003112 ret = -ENOENT;
3113 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003114 }
Eric Anholt673a3942008-07-30 12:06:12 -07003115
Chris Wilson05394f32010-11-08 19:18:58 +00003116 if (obj->madv != I915_MADV_WILLNEED) {
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003117 DRM_ERROR("Attempting to pin a purgeable buffer\n");
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003118 ret = -EINVAL;
3119 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003120 }
3121
Chris Wilson05394f32010-11-08 19:18:58 +00003122 if (obj->pin_filp != NULL && obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003123 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3124 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003125 ret = -EINVAL;
3126 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003127 }
3128
Chris Wilson05394f32010-11-08 19:18:58 +00003129 obj->user_pin_count++;
3130 obj->pin_filp = file;
3131 if (obj->user_pin_count == 1) {
Daniel Vetter75e9e912010-11-04 17:11:09 +01003132 ret = i915_gem_object_pin(obj, args->alignment, true);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003133 if (ret)
3134 goto out;
Eric Anholt673a3942008-07-30 12:06:12 -07003135 }
3136
3137 /* XXX - flush the CPU caches for pinned objects
3138 * as the X server doesn't manage domains yet
3139 */
Eric Anholte47c68e2008-11-14 13:35:19 -08003140 i915_gem_object_flush_cpu_write_domain(obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003141 args->offset = obj->gtt_offset;
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003142out:
Chris Wilson05394f32010-11-08 19:18:58 +00003143 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003144unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003145 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003146 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003147}
3148
3149int
3150i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003151 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003152{
3153 struct drm_i915_gem_pin *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003154 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003155 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003156
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003157 ret = i915_mutex_lock_interruptible(dev);
3158 if (ret)
3159 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003160
Chris Wilson05394f32010-11-08 19:18:58 +00003161 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003162 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003163 ret = -ENOENT;
3164 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003165 }
Chris Wilson76c1dec2010-09-25 11:22:51 +01003166
Chris Wilson05394f32010-11-08 19:18:58 +00003167 if (obj->pin_filp != file) {
Jesse Barnes79e53942008-11-07 14:24:08 -08003168 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3169 args->handle);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003170 ret = -EINVAL;
3171 goto out;
Jesse Barnes79e53942008-11-07 14:24:08 -08003172 }
Chris Wilson05394f32010-11-08 19:18:58 +00003173 obj->user_pin_count--;
3174 if (obj->user_pin_count == 0) {
3175 obj->pin_filp = NULL;
Jesse Barnes79e53942008-11-07 14:24:08 -08003176 i915_gem_object_unpin(obj);
3177 }
Eric Anholt673a3942008-07-30 12:06:12 -07003178
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003179out:
Chris Wilson05394f32010-11-08 19:18:58 +00003180 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003181unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003182 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003183 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003184}
3185
3186int
3187i915_gem_busy_ioctl(struct drm_device *dev, void *data,
Chris Wilson05394f32010-11-08 19:18:58 +00003188 struct drm_file *file)
Eric Anholt673a3942008-07-30 12:06:12 -07003189{
3190 struct drm_i915_gem_busy *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003191 struct drm_i915_gem_object *obj;
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003192 int ret;
3193
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003194 ret = i915_mutex_lock_interruptible(dev);
3195 if (ret)
3196 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003197
Chris Wilson05394f32010-11-08 19:18:58 +00003198 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003199 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003200 ret = -ENOENT;
3201 goto unlock;
Eric Anholt673a3942008-07-30 12:06:12 -07003202 }
Zou Nan haid1b851f2010-05-21 09:08:57 +08003203
Chris Wilson0be555b2010-08-04 15:36:30 +01003204 /* Count all active objects as busy, even if they are currently not used
3205 * by the gpu. Users of this interface expect objects to eventually
3206 * become non-busy without any further actions, therefore emit any
3207 * necessary flushes here.
Eric Anholtc4de0a52008-12-14 19:05:04 -08003208 */
Chris Wilson05394f32010-11-08 19:18:58 +00003209 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003210 if (args->busy) {
3211 /* Unconditionally flush objects, even when the gpu still uses this
3212 * object. Userspace calling this function indicates that it wants to
3213 * use this buffer rather sooner than later, so issuing the required
3214 * flush earlier is beneficial.
3215 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003216 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
Chris Wilsondb53a302011-02-03 11:57:46 +00003217 ret = i915_gem_flush_ring(obj->ring,
Chris Wilson88241782011-01-07 17:09:48 +00003218 0, obj->base.write_domain);
Chris Wilson1a1c6972010-12-07 23:00:20 +00003219 } else if (obj->ring->outstanding_lazy_request ==
3220 obj->last_rendering_seqno) {
3221 struct drm_i915_gem_request *request;
3222
Chris Wilson7a194872010-12-07 10:38:40 +00003223 /* This ring is not being cleared by active usage,
3224 * so emit a request to do so.
3225 */
Chris Wilson1a1c6972010-12-07 23:00:20 +00003226 request = kzalloc(sizeof(*request), GFP_KERNEL);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003227 if (request) {
Akshay Joshi0206e352011-08-16 15:34:10 -04003228 ret = i915_add_request(obj->ring, NULL, request);
Rakib Mullick457eafc2011-11-16 00:49:28 +06003229 if (ret)
3230 kfree(request);
3231 } else
Chris Wilson7a194872010-12-07 10:38:40 +00003232 ret = -ENOMEM;
3233 }
Chris Wilson0be555b2010-08-04 15:36:30 +01003234
3235 /* Update the active list for the hardware's current position.
3236 * Otherwise this only updates on a delayed timer or when irqs
3237 * are actually unmasked, and our working set ends up being
3238 * larger than required.
3239 */
Chris Wilsondb53a302011-02-03 11:57:46 +00003240 i915_gem_retire_requests_ring(obj->ring);
Chris Wilson0be555b2010-08-04 15:36:30 +01003241
Chris Wilson05394f32010-11-08 19:18:58 +00003242 args->busy = obj->active;
Chris Wilson0be555b2010-08-04 15:36:30 +01003243 }
Eric Anholt673a3942008-07-30 12:06:12 -07003244
Chris Wilson05394f32010-11-08 19:18:58 +00003245 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003246unlock:
Eric Anholt673a3942008-07-30 12:06:12 -07003247 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003248 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003249}
3250
3251int
3252i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3253 struct drm_file *file_priv)
3254{
Akshay Joshi0206e352011-08-16 15:34:10 -04003255 return i915_gem_ring_throttle(dev, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -07003256}
3257
Chris Wilson3ef94da2009-09-14 16:50:29 +01003258int
3259i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3260 struct drm_file *file_priv)
3261{
3262 struct drm_i915_gem_madvise *args = data;
Chris Wilson05394f32010-11-08 19:18:58 +00003263 struct drm_i915_gem_object *obj;
Chris Wilson76c1dec2010-09-25 11:22:51 +01003264 int ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003265
3266 switch (args->madv) {
3267 case I915_MADV_DONTNEED:
3268 case I915_MADV_WILLNEED:
3269 break;
3270 default:
3271 return -EINVAL;
3272 }
3273
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003274 ret = i915_mutex_lock_interruptible(dev);
3275 if (ret)
3276 return ret;
3277
Chris Wilson05394f32010-11-08 19:18:58 +00003278 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
Chris Wilsonc8725222011-02-19 11:31:06 +00003279 if (&obj->base == NULL) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003280 ret = -ENOENT;
3281 goto unlock;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003282 }
Chris Wilson3ef94da2009-09-14 16:50:29 +01003283
Chris Wilson05394f32010-11-08 19:18:58 +00003284 if (obj->pin_count) {
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003285 ret = -EINVAL;
3286 goto out;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003287 }
3288
Chris Wilson05394f32010-11-08 19:18:58 +00003289 if (obj->madv != __I915_MADV_PURGED)
3290 obj->madv = args->madv;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003291
Chris Wilson2d7ef392009-09-20 23:13:10 +01003292 /* if the object is no longer bound, discard its backing storage */
Chris Wilson05394f32010-11-08 19:18:58 +00003293 if (i915_gem_object_is_purgeable(obj) &&
3294 obj->gtt_space == NULL)
Chris Wilson2d7ef392009-09-20 23:13:10 +01003295 i915_gem_object_truncate(obj);
3296
Chris Wilson05394f32010-11-08 19:18:58 +00003297 args->retained = obj->madv != __I915_MADV_PURGED;
Chris Wilsonbb6baf72009-09-22 14:24:13 +01003298
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003299out:
Chris Wilson05394f32010-11-08 19:18:58 +00003300 drm_gem_object_unreference(&obj->base);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003301unlock:
Chris Wilson3ef94da2009-09-14 16:50:29 +01003302 mutex_unlock(&dev->struct_mutex);
Chris Wilson1d7cfea2010-10-17 09:45:41 +01003303 return ret;
Chris Wilson3ef94da2009-09-14 16:50:29 +01003304}
3305
Chris Wilson05394f32010-11-08 19:18:58 +00003306struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3307 size_t size)
Daniel Vetterac52bc52010-04-09 19:05:06 +00003308{
Chris Wilson73aa8082010-09-30 11:46:12 +01003309 struct drm_i915_private *dev_priv = dev->dev_private;
Daniel Vetterc397b902010-04-09 19:05:07 +00003310 struct drm_i915_gem_object *obj;
Hugh Dickins5949eac2011-06-27 16:18:18 -07003311 struct address_space *mapping;
Daniel Vetterc397b902010-04-09 19:05:07 +00003312
3313 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3314 if (obj == NULL)
3315 return NULL;
3316
3317 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3318 kfree(obj);
3319 return NULL;
3320 }
3321
Hugh Dickins5949eac2011-06-27 16:18:18 -07003322 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3323 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3324
Chris Wilson73aa8082010-09-30 11:46:12 +01003325 i915_gem_info_add_obj(dev_priv, size);
3326
Daniel Vetterc397b902010-04-09 19:05:07 +00003327 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3328 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3329
Eugeni Dodonov3d29b842012-01-17 14:43:53 -02003330 if (HAS_LLC(dev)) {
3331 /* On some devices, we can have the GPU use the LLC (the CPU
Eric Anholta1871112011-03-29 16:59:55 -07003332 * cache) for about a 10% performance improvement
3333 * compared to uncached. Graphics requests other than
3334 * display scanout are coherent with the CPU in
3335 * accessing this cache. This means in this mode we
3336 * don't need to clflush on the CPU side, and on the
3337 * GPU side we only need to flush internal caches to
3338 * get data visible to the CPU.
3339 *
3340 * However, we maintain the display planes as UC, and so
3341 * need to rebind when first used as such.
3342 */
3343 obj->cache_level = I915_CACHE_LLC;
3344 } else
3345 obj->cache_level = I915_CACHE_NONE;
3346
Daniel Vetter62b8b212010-04-09 19:05:08 +00003347 obj->base.driver_private = NULL;
Daniel Vetterc397b902010-04-09 19:05:07 +00003348 obj->fence_reg = I915_FENCE_REG_NONE;
Chris Wilson69dc4982010-10-19 10:36:51 +01003349 INIT_LIST_HEAD(&obj->mm_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003350 INIT_LIST_HEAD(&obj->gtt_list);
Chris Wilson69dc4982010-10-19 10:36:51 +01003351 INIT_LIST_HEAD(&obj->ring_list);
Chris Wilson432e58e2010-11-25 19:32:06 +00003352 INIT_LIST_HEAD(&obj->exec_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003353 INIT_LIST_HEAD(&obj->gpu_write_list);
Daniel Vetterc397b902010-04-09 19:05:07 +00003354 obj->madv = I915_MADV_WILLNEED;
Daniel Vetter75e9e912010-11-04 17:11:09 +01003355 /* Avoid an unnecessary call to unbind on the first bind. */
3356 obj->map_and_fenceable = true;
Daniel Vetterc397b902010-04-09 19:05:07 +00003357
Chris Wilson05394f32010-11-08 19:18:58 +00003358 return obj;
Daniel Vetterac52bc52010-04-09 19:05:06 +00003359}
3360
Eric Anholt673a3942008-07-30 12:06:12 -07003361int i915_gem_init_object(struct drm_gem_object *obj)
3362{
Daniel Vetterc397b902010-04-09 19:05:07 +00003363 BUG();
Jesse Barnesde151cf2008-11-12 10:03:55 -08003364
Eric Anholt673a3942008-07-30 12:06:12 -07003365 return 0;
3366}
3367
Chris Wilson1488fc02012-04-24 15:47:31 +01003368void i915_gem_free_object(struct drm_gem_object *gem_obj)
Chris Wilsonbe726152010-07-23 23:18:50 +01003369{
Chris Wilson1488fc02012-04-24 15:47:31 +01003370 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
Chris Wilson05394f32010-11-08 19:18:58 +00003371 struct drm_device *dev = obj->base.dev;
Chris Wilsonbe726152010-07-23 23:18:50 +01003372 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsonbe726152010-07-23 23:18:50 +01003373
Chris Wilson26e12f892011-03-20 11:20:19 +00003374 trace_i915_gem_object_destroy(obj);
3375
Chris Wilson1488fc02012-04-24 15:47:31 +01003376 if (obj->phys_obj)
3377 i915_gem_detach_phys_object(dev, obj);
3378
3379 obj->pin_count = 0;
3380 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3381 bool was_interruptible;
3382
3383 was_interruptible = dev_priv->mm.interruptible;
3384 dev_priv->mm.interruptible = false;
3385
3386 WARN_ON(i915_gem_object_unbind(obj));
3387
3388 dev_priv->mm.interruptible = was_interruptible;
3389 }
3390
Chris Wilson05394f32010-11-08 19:18:58 +00003391 if (obj->base.map_list.map)
Rob Clarkb464e9a2011-08-10 08:09:08 -05003392 drm_gem_free_mmap_offset(&obj->base);
Chris Wilsonbe726152010-07-23 23:18:50 +01003393
Chris Wilson05394f32010-11-08 19:18:58 +00003394 drm_gem_object_release(&obj->base);
3395 i915_gem_info_remove_obj(dev_priv, obj->base.size);
Chris Wilsonbe726152010-07-23 23:18:50 +01003396
Chris Wilson05394f32010-11-08 19:18:58 +00003397 kfree(obj->bit_17);
3398 kfree(obj);
Chris Wilsonbe726152010-07-23 23:18:50 +01003399}
3400
Jesse Barnes5669fca2009-02-17 15:13:31 -08003401int
Eric Anholt673a3942008-07-30 12:06:12 -07003402i915_gem_idle(struct drm_device *dev)
3403{
3404 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson29105cc2010-01-07 10:39:13 +00003405 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003406
Keith Packard6dbe2772008-10-14 21:41:13 -07003407 mutex_lock(&dev->struct_mutex);
3408
Chris Wilson87acb0a2010-10-19 10:13:00 +01003409 if (dev_priv->mm.suspended) {
Keith Packard6dbe2772008-10-14 21:41:13 -07003410 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003411 return 0;
Keith Packard6dbe2772008-10-14 21:41:13 -07003412 }
Eric Anholt673a3942008-07-30 12:06:12 -07003413
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08003414 ret = i915_gpu_idle(dev, true);
Keith Packard6dbe2772008-10-14 21:41:13 -07003415 if (ret) {
3416 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -07003417 return ret;
Keith Packard6dbe2772008-10-14 21:41:13 -07003418 }
Eric Anholt673a3942008-07-30 12:06:12 -07003419
Chris Wilson29105cc2010-01-07 10:39:13 +00003420 /* Under UMS, be paranoid and evict. */
Chris Wilsona39d7ef2012-04-24 18:22:52 +01003421 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3422 i915_gem_evict_everything(dev, false);
Chris Wilson29105cc2010-01-07 10:39:13 +00003423
Chris Wilson312817a2010-11-22 11:50:11 +00003424 i915_gem_reset_fences(dev);
3425
Chris Wilson29105cc2010-01-07 10:39:13 +00003426 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3427 * We need to replace this with a semaphore, or something.
3428 * And not confound mm.suspended!
3429 */
3430 dev_priv->mm.suspended = 1;
Daniel Vetterbc0c7f12010-08-20 18:18:48 +02003431 del_timer_sync(&dev_priv->hangcheck_timer);
Chris Wilson29105cc2010-01-07 10:39:13 +00003432
3433 i915_kernel_lost_context(dev);
Keith Packard6dbe2772008-10-14 21:41:13 -07003434 i915_gem_cleanup_ringbuffer(dev);
Chris Wilson29105cc2010-01-07 10:39:13 +00003435
Keith Packard6dbe2772008-10-14 21:41:13 -07003436 mutex_unlock(&dev->struct_mutex);
3437
Chris Wilson29105cc2010-01-07 10:39:13 +00003438 /* Cancel the retire work handler, which should be idle now. */
3439 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3440
Eric Anholt673a3942008-07-30 12:06:12 -07003441 return 0;
3442}
3443
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003444void i915_gem_init_swizzling(struct drm_device *dev)
3445{
3446 drm_i915_private_t *dev_priv = dev->dev_private;
3447
Daniel Vetter11782b02012-01-31 16:47:55 +01003448 if (INTEL_INFO(dev)->gen < 5 ||
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003449 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3450 return;
3451
3452 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3453 DISP_TILE_SURFACE_SWIZZLING);
3454
Daniel Vetter11782b02012-01-31 16:47:55 +01003455 if (IS_GEN5(dev))
3456 return;
3457
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003458 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3459 if (IS_GEN6(dev))
Daniel Vetter6b26c862012-04-24 14:04:12 +02003460 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003461 else
Daniel Vetter6b26c862012-04-24 14:04:12 +02003462 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003463}
Daniel Vettere21af882012-02-09 20:53:27 +01003464
3465void i915_gem_init_ppgtt(struct drm_device *dev)
3466{
3467 drm_i915_private_t *dev_priv = dev->dev_private;
3468 uint32_t pd_offset;
3469 struct intel_ring_buffer *ring;
Daniel Vetter55a254a2012-03-22 00:14:43 +01003470 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3471 uint32_t __iomem *pd_addr;
3472 uint32_t pd_entry;
Daniel Vettere21af882012-02-09 20:53:27 +01003473 int i;
3474
3475 if (!dev_priv->mm.aliasing_ppgtt)
3476 return;
3477
Daniel Vetter55a254a2012-03-22 00:14:43 +01003478
3479 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3480 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3481 dma_addr_t pt_addr;
3482
3483 if (dev_priv->mm.gtt->needs_dmar)
3484 pt_addr = ppgtt->pt_dma_addr[i];
3485 else
3486 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3487
3488 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3489 pd_entry |= GEN6_PDE_VALID;
3490
3491 writel(pd_entry, pd_addr + i);
3492 }
3493 readl(pd_addr);
3494
3495 pd_offset = ppgtt->pd_offset;
Daniel Vettere21af882012-02-09 20:53:27 +01003496 pd_offset /= 64; /* in cachelines, */
3497 pd_offset <<= 16;
3498
3499 if (INTEL_INFO(dev)->gen == 6) {
Daniel Vetter48ecfa12012-04-11 20:42:40 +02003500 uint32_t ecochk, gab_ctl, ecobits;
3501
3502 ecobits = I915_READ(GAC_ECO_BITS);
3503 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
Daniel Vetterbe901a52012-04-11 20:42:39 +02003504
3505 gab_ctl = I915_READ(GAB_CTL);
3506 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3507
3508 ecochk = I915_READ(GAM_ECOCHK);
Daniel Vettere21af882012-02-09 20:53:27 +01003509 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3510 ECOCHK_PPGTT_CACHE64B);
Daniel Vetter6b26c862012-04-24 14:04:12 +02003511 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Daniel Vettere21af882012-02-09 20:53:27 +01003512 } else if (INTEL_INFO(dev)->gen >= 7) {
3513 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3514 /* GFX_MODE is per-ring on gen7+ */
3515 }
3516
3517 for (i = 0; i < I915_NUM_RINGS; i++) {
3518 ring = &dev_priv->ring[i];
3519
3520 if (INTEL_INFO(dev)->gen >= 7)
3521 I915_WRITE(RING_MODE_GEN7(ring),
Daniel Vetter6b26c862012-04-24 14:04:12 +02003522 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
Daniel Vettere21af882012-02-09 20:53:27 +01003523
3524 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3525 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3526 }
3527}
3528
Eric Anholt673a3942008-07-30 12:06:12 -07003529int
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003530i915_gem_init_hw(struct drm_device *dev)
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003531{
3532 drm_i915_private_t *dev_priv = dev->dev_private;
3533 int ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003534
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003535 i915_gem_init_swizzling(dev);
3536
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003537 ret = intel_init_render_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003538 if (ret)
Chris Wilsonb6913e42010-11-12 10:46:37 +00003539 return ret;
Chris Wilson68f95ba2010-05-27 13:18:22 +01003540
3541 if (HAS_BSD(dev)) {
Xiang, Haihao5c1143b2010-09-16 10:43:11 +08003542 ret = intel_init_bsd_ring_buffer(dev);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003543 if (ret)
3544 goto cleanup_render_ring;
Zou Nan haid1b851f2010-05-21 09:08:57 +08003545 }
Chris Wilson68f95ba2010-05-27 13:18:22 +01003546
Chris Wilson549f7362010-10-19 11:19:32 +01003547 if (HAS_BLT(dev)) {
3548 ret = intel_init_blt_ring_buffer(dev);
3549 if (ret)
3550 goto cleanup_bsd_ring;
3551 }
3552
Chris Wilson6f392d5482010-08-07 11:01:22 +01003553 dev_priv->next_seqno = 1;
3554
Daniel Vettere21af882012-02-09 20:53:27 +01003555 i915_gem_init_ppgtt(dev);
3556
Chris Wilson68f95ba2010-05-27 13:18:22 +01003557 return 0;
3558
Chris Wilson549f7362010-10-19 11:19:32 +01003559cleanup_bsd_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003560 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
Chris Wilson68f95ba2010-05-27 13:18:22 +01003561cleanup_render_ring:
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003562 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003563 return ret;
3564}
3565
Chris Wilson1070a422012-04-24 15:47:41 +01003566static bool
3567intel_enable_ppgtt(struct drm_device *dev)
3568{
3569 if (i915_enable_ppgtt >= 0)
3570 return i915_enable_ppgtt;
3571
3572#ifdef CONFIG_INTEL_IOMMU
3573 /* Disable ppgtt on SNB if VT-d is on. */
3574 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3575 return false;
3576#endif
3577
3578 return true;
3579}
3580
3581int i915_gem_init(struct drm_device *dev)
3582{
3583 struct drm_i915_private *dev_priv = dev->dev_private;
3584 unsigned long gtt_size, mappable_size;
3585 int ret;
3586
3587 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3588 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3589
3590 mutex_lock(&dev->struct_mutex);
3591 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3592 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3593 * aperture accordingly when using aliasing ppgtt. */
3594 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3595
3596 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3597
3598 ret = i915_gem_init_aliasing_ppgtt(dev);
3599 if (ret) {
3600 mutex_unlock(&dev->struct_mutex);
3601 return ret;
3602 }
3603 } else {
3604 /* Let GEM Manage all of the aperture.
3605 *
3606 * However, leave one page at the end still bound to the scratch
3607 * page. There are a number of places where the hardware
3608 * apparently prefetches past the end of the object, and we've
3609 * seen multiple hangs with the GPU head pointer stuck in a
3610 * batchbuffer bound at the last page of the aperture. One page
3611 * should be enough to keep any prefetching inside of the
3612 * aperture.
3613 */
3614 i915_gem_init_global_gtt(dev, 0, mappable_size,
3615 gtt_size);
3616 }
3617
3618 ret = i915_gem_init_hw(dev);
3619 mutex_unlock(&dev->struct_mutex);
3620 if (ret) {
3621 i915_gem_cleanup_aliasing_ppgtt(dev);
3622 return ret;
3623 }
3624
3625 /* Allow hardware batchbuffers unless told otherwise. */
3626 dev_priv->allow_batchbuffer = 1;
3627 return 0;
3628}
3629
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003630void
3631i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3632{
3633 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003634 int i;
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003635
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003636 for (i = 0; i < I915_NUM_RINGS; i++)
3637 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
Zou Nan hai8187a2b2010-05-21 09:08:55 +08003638}
3639
3640int
Eric Anholt673a3942008-07-30 12:06:12 -07003641i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3642 struct drm_file *file_priv)
3643{
3644 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003645 int ret, i;
Eric Anholt673a3942008-07-30 12:06:12 -07003646
Jesse Barnes79e53942008-11-07 14:24:08 -08003647 if (drm_core_check_feature(dev, DRIVER_MODESET))
3648 return 0;
3649
Ben Gamariba1234d2009-09-14 17:48:47 -04003650 if (atomic_read(&dev_priv->mm.wedged)) {
Eric Anholt673a3942008-07-30 12:06:12 -07003651 DRM_ERROR("Reenabling wedged hardware, good luck\n");
Ben Gamariba1234d2009-09-14 17:48:47 -04003652 atomic_set(&dev_priv->mm.wedged, 0);
Eric Anholt673a3942008-07-30 12:06:12 -07003653 }
3654
Eric Anholt673a3942008-07-30 12:06:12 -07003655 mutex_lock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003656 dev_priv->mm.suspended = 0;
3657
Daniel Vetterf691e2f2012-02-02 09:58:12 +01003658 ret = i915_gem_init_hw(dev);
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003659 if (ret != 0) {
3660 mutex_unlock(&dev->struct_mutex);
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003661 return ret;
Wu Fengguangd816f6a2009-04-18 10:43:32 +08003662 }
Eric Anholt9bb2d6f2008-12-23 18:42:32 -08003663
Chris Wilson69dc4982010-10-19 10:36:51 +01003664 BUG_ON(!list_empty(&dev_priv->mm.active_list));
Eric Anholt673a3942008-07-30 12:06:12 -07003665 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3666 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003667 for (i = 0; i < I915_NUM_RINGS; i++) {
3668 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3669 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3670 }
Eric Anholt673a3942008-07-30 12:06:12 -07003671 mutex_unlock(&dev->struct_mutex);
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003672
Chris Wilson5f353082010-06-07 14:03:03 +01003673 ret = drm_irq_install(dev);
3674 if (ret)
3675 goto cleanup_ringbuffer;
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003676
Eric Anholt673a3942008-07-30 12:06:12 -07003677 return 0;
Chris Wilson5f353082010-06-07 14:03:03 +01003678
3679cleanup_ringbuffer:
3680 mutex_lock(&dev->struct_mutex);
3681 i915_gem_cleanup_ringbuffer(dev);
3682 dev_priv->mm.suspended = 1;
3683 mutex_unlock(&dev->struct_mutex);
3684
3685 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003686}
3687
3688int
3689i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3690 struct drm_file *file_priv)
3691{
Jesse Barnes79e53942008-11-07 14:24:08 -08003692 if (drm_core_check_feature(dev, DRIVER_MODESET))
3693 return 0;
3694
Kristian Høgsbergdbb19d32008-08-20 11:04:27 -04003695 drm_irq_uninstall(dev);
Linus Torvaldse6890f62009-09-08 17:09:24 -07003696 return i915_gem_idle(dev);
Eric Anholt673a3942008-07-30 12:06:12 -07003697}
3698
3699void
3700i915_gem_lastclose(struct drm_device *dev)
3701{
3702 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -07003703
Eric Anholte806b492009-01-22 09:56:58 -08003704 if (drm_core_check_feature(dev, DRIVER_MODESET))
3705 return;
3706
Keith Packard6dbe2772008-10-14 21:41:13 -07003707 ret = i915_gem_idle(dev);
3708 if (ret)
3709 DRM_ERROR("failed to idle hardware: %d\n", ret);
Eric Anholt673a3942008-07-30 12:06:12 -07003710}
3711
Chris Wilson64193402010-10-24 12:38:05 +01003712static void
3713init_ring_lists(struct intel_ring_buffer *ring)
3714{
3715 INIT_LIST_HEAD(&ring->active_list);
3716 INIT_LIST_HEAD(&ring->request_list);
3717 INIT_LIST_HEAD(&ring->gpu_write_list);
3718}
3719
Eric Anholt673a3942008-07-30 12:06:12 -07003720void
3721i915_gem_load(struct drm_device *dev)
3722{
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003723 int i;
Eric Anholt673a3942008-07-30 12:06:12 -07003724 drm_i915_private_t *dev_priv = dev->dev_private;
3725
Chris Wilson69dc4982010-10-19 10:36:51 +01003726 INIT_LIST_HEAD(&dev_priv->mm.active_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003727 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3728 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Eric Anholta09ba7f2009-08-29 12:49:51 -07003729 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
Daniel Vetter93a37f22010-11-05 20:24:53 +01003730 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003731 for (i = 0; i < I915_NUM_RINGS; i++)
3732 init_ring_lists(&dev_priv->ring[i]);
Daniel Vetter4b9de732011-10-09 21:52:02 +02003733 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
Daniel Vetter007cc8a2010-04-28 11:02:31 +02003734 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
Eric Anholt673a3942008-07-30 12:06:12 -07003735 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3736 i915_gem_retire_work_handler);
Chris Wilson30dbf0c2010-09-25 10:19:17 +01003737 init_completion(&dev_priv->error_completion);
Chris Wilson31169712009-09-14 16:50:28 +01003738
Dave Airlie94400122010-07-20 13:15:31 +10003739 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3740 if (IS_GEN3(dev)) {
3741 u32 tmp = I915_READ(MI_ARB_STATE);
3742 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3743 /* arb state is a masked write, so set bit + bit in mask */
3744 tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3745 I915_WRITE(MI_ARB_STATE, tmp);
3746 }
3747 }
3748
Chris Wilson72bfa192010-12-19 11:42:05 +00003749 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3750
Jesse Barnesde151cf2008-11-12 10:03:55 -08003751 /* Old X drivers will take 0-2 for front, back, depth buffers */
Eric Anholtb397c832010-01-26 09:43:10 -08003752 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3753 dev_priv->fence_reg_start = 3;
Jesse Barnesde151cf2008-11-12 10:03:55 -08003754
Chris Wilsona6c45cf2010-09-17 00:32:17 +01003755 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
Jesse Barnesde151cf2008-11-12 10:03:55 -08003756 dev_priv->num_fence_regs = 16;
3757 else
3758 dev_priv->num_fence_regs = 8;
3759
Grégoire Henryb5aa8a02009-06-23 15:41:02 +02003760 /* Initialize fence registers to zero */
Chris Wilsonada726c2012-04-17 15:31:32 +01003761 i915_gem_reset_fences(dev);
Eric Anholt10ed13e2011-05-06 13:53:49 -07003762
Eric Anholt673a3942008-07-30 12:06:12 -07003763 i915_gem_detect_bit_6_swizzle(dev);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05003764 init_waitqueue_head(&dev_priv->pending_flip_queue);
Chris Wilson17250b72010-10-28 12:51:39 +01003765
Chris Wilsonce453d82011-02-21 14:43:56 +00003766 dev_priv->mm.interruptible = true;
3767
Chris Wilson17250b72010-10-28 12:51:39 +01003768 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3769 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3770 register_shrinker(&dev_priv->mm.inactive_shrinker);
Eric Anholt673a3942008-07-30 12:06:12 -07003771}
Dave Airlie71acb5e2008-12-30 20:31:46 +10003772
3773/*
3774 * Create a physically contiguous memory object for this object
3775 * e.g. for cursor + overlay regs
3776 */
Chris Wilson995b6762010-08-20 13:23:26 +01003777static int i915_gem_init_phys_object(struct drm_device *dev,
3778 int id, int size, int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003779{
3780 drm_i915_private_t *dev_priv = dev->dev_private;
3781 struct drm_i915_gem_phys_object *phys_obj;
3782 int ret;
3783
3784 if (dev_priv->mm.phys_objs[id - 1] || !size)
3785 return 0;
3786
Eric Anholt9a298b22009-03-24 12:23:04 -07003787 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003788 if (!phys_obj)
3789 return -ENOMEM;
3790
3791 phys_obj->id = id;
3792
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003793 phys_obj->handle = drm_pci_alloc(dev, size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003794 if (!phys_obj->handle) {
3795 ret = -ENOMEM;
3796 goto kfree_obj;
3797 }
3798#ifdef CONFIG_X86
3799 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3800#endif
3801
3802 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3803
3804 return 0;
3805kfree_obj:
Eric Anholt9a298b22009-03-24 12:23:04 -07003806 kfree(phys_obj);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003807 return ret;
3808}
3809
Chris Wilson995b6762010-08-20 13:23:26 +01003810static void i915_gem_free_phys_object(struct drm_device *dev, int id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003811{
3812 drm_i915_private_t *dev_priv = dev->dev_private;
3813 struct drm_i915_gem_phys_object *phys_obj;
3814
3815 if (!dev_priv->mm.phys_objs[id - 1])
3816 return;
3817
3818 phys_obj = dev_priv->mm.phys_objs[id - 1];
3819 if (phys_obj->cur_obj) {
3820 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3821 }
3822
3823#ifdef CONFIG_X86
3824 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3825#endif
3826 drm_pci_free(dev, phys_obj->handle);
3827 kfree(phys_obj);
3828 dev_priv->mm.phys_objs[id - 1] = NULL;
3829}
3830
3831void i915_gem_free_all_phys_object(struct drm_device *dev)
3832{
3833 int i;
3834
Dave Airlie260883c2009-01-22 17:58:49 +10003835 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003836 i915_gem_free_phys_object(dev, i);
3837}
3838
3839void i915_gem_detach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003840 struct drm_i915_gem_object *obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003841{
Chris Wilson05394f32010-11-08 19:18:58 +00003842 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Chris Wilsone5281cc2010-10-28 13:45:36 +01003843 char *vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003844 int i;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003845 int page_count;
3846
Chris Wilson05394f32010-11-08 19:18:58 +00003847 if (!obj->phys_obj)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003848 return;
Chris Wilson05394f32010-11-08 19:18:58 +00003849 vaddr = obj->phys_obj->handle->vaddr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003850
Chris Wilson05394f32010-11-08 19:18:58 +00003851 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003852 for (i = 0; i < page_count; i++) {
Hugh Dickins5949eac2011-06-27 16:18:18 -07003853 struct page *page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003854 if (!IS_ERR(page)) {
3855 char *dst = kmap_atomic(page);
3856 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3857 kunmap_atomic(dst);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003858
Chris Wilsone5281cc2010-10-28 13:45:36 +01003859 drm_clflush_pages(&page, 1);
3860
3861 set_page_dirty(page);
3862 mark_page_accessed(page);
3863 page_cache_release(page);
3864 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003865 }
Daniel Vetter40ce6572010-11-05 18:12:18 +01003866 intel_gtt_chipset_flush();
Chris Wilsond78b47b2009-06-17 21:52:49 +01003867
Chris Wilson05394f32010-11-08 19:18:58 +00003868 obj->phys_obj->cur_obj = NULL;
3869 obj->phys_obj = NULL;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003870}
3871
3872int
3873i915_gem_attach_phys_object(struct drm_device *dev,
Chris Wilson05394f32010-11-08 19:18:58 +00003874 struct drm_i915_gem_object *obj,
Chris Wilson6eeefaf2010-08-07 11:01:39 +01003875 int id,
3876 int align)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003877{
Chris Wilson05394f32010-11-08 19:18:58 +00003878 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003879 drm_i915_private_t *dev_priv = dev->dev_private;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003880 int ret = 0;
3881 int page_count;
3882 int i;
3883
3884 if (id > I915_MAX_PHYS_OBJECT)
3885 return -EINVAL;
3886
Chris Wilson05394f32010-11-08 19:18:58 +00003887 if (obj->phys_obj) {
3888 if (obj->phys_obj->id == id)
Dave Airlie71acb5e2008-12-30 20:31:46 +10003889 return 0;
3890 i915_gem_detach_phys_object(dev, obj);
3891 }
3892
Dave Airlie71acb5e2008-12-30 20:31:46 +10003893 /* create a new object */
3894 if (!dev_priv->mm.phys_objs[id - 1]) {
3895 ret = i915_gem_init_phys_object(dev, id,
Chris Wilson05394f32010-11-08 19:18:58 +00003896 obj->base.size, align);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003897 if (ret) {
Chris Wilson05394f32010-11-08 19:18:58 +00003898 DRM_ERROR("failed to init phys object %d size: %zu\n",
3899 id, obj->base.size);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003900 return ret;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003901 }
3902 }
3903
3904 /* bind to the object */
Chris Wilson05394f32010-11-08 19:18:58 +00003905 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3906 obj->phys_obj->cur_obj = obj;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003907
Chris Wilson05394f32010-11-08 19:18:58 +00003908 page_count = obj->base.size / PAGE_SIZE;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003909
3910 for (i = 0; i < page_count; i++) {
Chris Wilsone5281cc2010-10-28 13:45:36 +01003911 struct page *page;
3912 char *dst, *src;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003913
Hugh Dickins5949eac2011-06-27 16:18:18 -07003914 page = shmem_read_mapping_page(mapping, i);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003915 if (IS_ERR(page))
3916 return PTR_ERR(page);
3917
Chris Wilsonff75b9b2010-10-30 22:52:31 +01003918 src = kmap_atomic(page);
Chris Wilson05394f32010-11-08 19:18:58 +00003919 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003920 memcpy(dst, src, PAGE_SIZE);
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -07003921 kunmap_atomic(src);
Chris Wilsone5281cc2010-10-28 13:45:36 +01003922
3923 mark_page_accessed(page);
3924 page_cache_release(page);
Dave Airlie71acb5e2008-12-30 20:31:46 +10003925 }
3926
3927 return 0;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003928}
3929
3930static int
Chris Wilson05394f32010-11-08 19:18:58 +00003931i915_gem_phys_pwrite(struct drm_device *dev,
3932 struct drm_i915_gem_object *obj,
Dave Airlie71acb5e2008-12-30 20:31:46 +10003933 struct drm_i915_gem_pwrite *args,
3934 struct drm_file *file_priv)
3935{
Chris Wilson05394f32010-11-08 19:18:58 +00003936 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003937 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
Dave Airlie71acb5e2008-12-30 20:31:46 +10003938
Chris Wilsonb47b30c2010-11-08 01:12:29 +00003939 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
3940 unsigned long unwritten;
3941
3942 /* The physical object once assigned is fixed for the lifetime
3943 * of the obj, so we can safely drop the lock and continue
3944 * to access vaddr.
3945 */
3946 mutex_unlock(&dev->struct_mutex);
3947 unwritten = copy_from_user(vaddr, user_data, args->size);
3948 mutex_lock(&dev->struct_mutex);
3949 if (unwritten)
3950 return -EFAULT;
3951 }
Dave Airlie71acb5e2008-12-30 20:31:46 +10003952
Daniel Vetter40ce6572010-11-05 18:12:18 +01003953 intel_gtt_chipset_flush();
Dave Airlie71acb5e2008-12-30 20:31:46 +10003954 return 0;
3955}
Eric Anholtb9624422009-06-03 07:27:35 +00003956
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003957void i915_gem_release(struct drm_device *dev, struct drm_file *file)
Eric Anholtb9624422009-06-03 07:27:35 +00003958{
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003959 struct drm_i915_file_private *file_priv = file->driver_priv;
Eric Anholtb9624422009-06-03 07:27:35 +00003960
3961 /* Clean up our request list when the client is going away, so that
3962 * later retire_requests won't dereference our soon-to-be-gone
3963 * file_priv.
3964 */
Chris Wilson1c255952010-09-26 11:03:27 +01003965 spin_lock(&file_priv->mm.lock);
Chris Wilsonf787a5f2010-09-24 16:02:42 +01003966 while (!list_empty(&file_priv->mm.request_list)) {
3967 struct drm_i915_gem_request *request;
3968
3969 request = list_first_entry(&file_priv->mm.request_list,
3970 struct drm_i915_gem_request,
3971 client_list);
3972 list_del(&request->client_list);
3973 request->file_priv = NULL;
3974 }
Chris Wilson1c255952010-09-26 11:03:27 +01003975 spin_unlock(&file_priv->mm.lock);
Eric Anholtb9624422009-06-03 07:27:35 +00003976}
Chris Wilson31169712009-09-14 16:50:28 +01003977
Chris Wilson31169712009-09-14 16:50:28 +01003978static int
Chris Wilson1637ef42010-04-20 17:10:35 +01003979i915_gpu_is_active(struct drm_device *dev)
3980{
3981 drm_i915_private_t *dev_priv = dev->dev_private;
3982 int lists_empty;
3983
Chris Wilson1637ef42010-04-20 17:10:35 +01003984 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson17250b72010-10-28 12:51:39 +01003985 list_empty(&dev_priv->mm.active_list);
Chris Wilson1637ef42010-04-20 17:10:35 +01003986
3987 return !lists_empty;
3988}
3989
3990static int
Ying Han1495f232011-05-24 17:12:27 -07003991i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
Chris Wilson31169712009-09-14 16:50:28 +01003992{
Chris Wilson17250b72010-10-28 12:51:39 +01003993 struct drm_i915_private *dev_priv =
3994 container_of(shrinker,
3995 struct drm_i915_private,
3996 mm.inactive_shrinker);
3997 struct drm_device *dev = dev_priv->dev;
3998 struct drm_i915_gem_object *obj, *next;
Ying Han1495f232011-05-24 17:12:27 -07003999 int nr_to_scan = sc->nr_to_scan;
Chris Wilson17250b72010-10-28 12:51:39 +01004000 int cnt;
4001
4002 if (!mutex_trylock(&dev->struct_mutex))
Chris Wilsonbbe2e112010-10-28 22:35:07 +01004003 return 0;
Chris Wilson31169712009-09-14 16:50:28 +01004004
4005 /* "fast-path" to count number of available objects */
4006 if (nr_to_scan == 0) {
Chris Wilson17250b72010-10-28 12:51:39 +01004007 cnt = 0;
4008 list_for_each_entry(obj,
4009 &dev_priv->mm.inactive_list,
4010 mm_list)
4011 cnt++;
4012 mutex_unlock(&dev->struct_mutex);
4013 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004014 }
4015
Chris Wilson1637ef42010-04-20 17:10:35 +01004016rescan:
Chris Wilson31169712009-09-14 16:50:28 +01004017 /* first scan for clean buffers */
Chris Wilson17250b72010-10-28 12:51:39 +01004018 i915_gem_retire_requests(dev);
Chris Wilson31169712009-09-14 16:50:28 +01004019
Chris Wilson17250b72010-10-28 12:51:39 +01004020 list_for_each_entry_safe(obj, next,
4021 &dev_priv->mm.inactive_list,
4022 mm_list) {
4023 if (i915_gem_object_is_purgeable(obj)) {
Chris Wilson20217462010-11-23 15:26:33 +00004024 if (i915_gem_object_unbind(obj) == 0 &&
4025 --nr_to_scan == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004026 break;
Chris Wilson31169712009-09-14 16:50:28 +01004027 }
Chris Wilson31169712009-09-14 16:50:28 +01004028 }
4029
4030 /* second pass, evict/count anything still on the inactive list */
Chris Wilson17250b72010-10-28 12:51:39 +01004031 cnt = 0;
4032 list_for_each_entry_safe(obj, next,
4033 &dev_priv->mm.inactive_list,
4034 mm_list) {
Chris Wilson20217462010-11-23 15:26:33 +00004035 if (nr_to_scan &&
4036 i915_gem_object_unbind(obj) == 0)
Chris Wilson17250b72010-10-28 12:51:39 +01004037 nr_to_scan--;
Chris Wilson20217462010-11-23 15:26:33 +00004038 else
Chris Wilson17250b72010-10-28 12:51:39 +01004039 cnt++;
Chris Wilson31169712009-09-14 16:50:28 +01004040 }
4041
Chris Wilson17250b72010-10-28 12:51:39 +01004042 if (nr_to_scan && i915_gpu_is_active(dev)) {
Chris Wilson1637ef42010-04-20 17:10:35 +01004043 /*
4044 * We are desperate for pages, so as a last resort, wait
4045 * for the GPU to finish and discard whatever we can.
4046 * This has a dramatic impact to reduce the number of
4047 * OOM-killer events whilst running the GPU aggressively.
4048 */
Ben Widawskyb93f9cf2012-01-25 15:39:34 -08004049 if (i915_gpu_idle(dev, true) == 0)
Chris Wilson1637ef42010-04-20 17:10:35 +01004050 goto rescan;
4051 }
Chris Wilson17250b72010-10-28 12:51:39 +01004052 mutex_unlock(&dev->struct_mutex);
4053 return cnt / 100 * sysctl_vfs_cache_pressure;
Chris Wilson31169712009-09-14 16:50:28 +01004054}