| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright © 2008 Intel Corporation | 
|  | 3 | * | 
|  | 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | 5 | * copy of this software and associated documentation files (the "Software"), | 
|  | 6 | * to deal in the Software without restriction, including without limitation | 
|  | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|  | 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
|  | 9 | * Software is furnished to do so, subject to the following conditions: | 
|  | 10 | * | 
|  | 11 | * The above copyright notice and this permission notice (including the next | 
|  | 12 | * paragraph) shall be included in all copies or substantial portions of the | 
|  | 13 | * Software. | 
|  | 14 | * | 
|  | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|  | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 
|  | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 
|  | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 
|  | 21 | * IN THE SOFTWARE. | 
|  | 22 | * | 
|  | 23 | * Authors: | 
|  | 24 | *    Eric Anholt <eric@anholt.net> | 
|  | 25 | * | 
|  | 26 | */ | 
|  | 27 |  | 
|  | 28 | #include "drmP.h" | 
|  | 29 | #include "drm.h" | 
|  | 30 | #include "i915_drm.h" | 
|  | 31 | #include "i915_drv.h" | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 32 | #include "i915_trace.h" | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 33 | #include "intel_drv.h" | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 35 | #include <linux/swap.h> | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 36 | #include <linux/pci.h> | 
| Zhenyu Wang | f8f235e | 2010-08-27 11:08:57 +0800 | [diff] [blame] | 37 | #include <linux/intel-gtt.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 38 |  | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 39 | struct change_domains { | 
|  | 40 | uint32_t invalidate_domains; | 
|  | 41 | uint32_t flush_domains; | 
|  | 42 | uint32_t flush_rings; | 
|  | 43 | }; | 
|  | 44 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 45 | static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); | 
|  | 46 | static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 47 |  | 
|  | 48 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 
|  | 49 | bool pipelined); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 50 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 
|  | 51 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 52 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 
|  | 53 | int write); | 
|  | 54 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 
|  | 55 | uint64_t offset, | 
|  | 56 | uint64_t size); | 
|  | 57 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 58 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 
|  | 59 | bool interruptible); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 60 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 61 | unsigned alignment, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 62 | bool map_and_fenceable); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 63 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 64 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 65 | struct drm_i915_gem_pwrite *args, | 
|  | 66 | struct drm_file *file_priv); | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 67 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 68 |  | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 69 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, | 
|  | 70 | int nr_to_scan, | 
|  | 71 | gfp_t gfp_mask); | 
|  | 72 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 73 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 74 | /* some bookkeeping */ | 
|  | 75 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 
|  | 76 | size_t size) | 
|  | 77 | { | 
|  | 78 | dev_priv->mm.object_count++; | 
|  | 79 | dev_priv->mm.object_memory += size; | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | 
|  | 83 | size_t size) | 
|  | 84 | { | 
|  | 85 | dev_priv->mm.object_count--; | 
|  | 86 | dev_priv->mm.object_memory -= size; | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 90 | struct drm_i915_gem_object *obj) | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 91 | { | 
|  | 92 | dev_priv->mm.gtt_count++; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 93 | dev_priv->mm.gtt_memory += obj->gtt_space->size; | 
|  | 94 | if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) { | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 95 | dev_priv->mm.mappable_gtt_used += | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 96 | min_t(size_t, obj->gtt_space->size, | 
|  | 97 | dev_priv->mm.gtt_mappable_end - obj->gtt_offset); | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 98 | } | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 99 | } | 
|  | 100 |  | 
|  | 101 | static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 102 | struct drm_i915_gem_object *obj) | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 103 | { | 
|  | 104 | dev_priv->mm.gtt_count--; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 105 | dev_priv->mm.gtt_memory -= obj->gtt_space->size; | 
|  | 106 | if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) { | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 107 | dev_priv->mm.mappable_gtt_used -= | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 108 | min_t(size_t, obj->gtt_space->size, | 
|  | 109 | dev_priv->mm.gtt_mappable_end - obj->gtt_offset); | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 110 | } | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | /** | 
|  | 114 | * Update the mappable working set counters. Call _only_ when there is a change | 
|  | 115 | * in one of (pin|fault)_mappable and update *_mappable _before_ calling. | 
|  | 116 | * @mappable: new state the changed mappable flag (either pin_ or fault_). | 
|  | 117 | */ | 
|  | 118 | static void | 
|  | 119 | i915_gem_info_update_mappable(struct drm_i915_private *dev_priv, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 120 | struct drm_i915_gem_object *obj, | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 121 | bool mappable) | 
|  | 122 | { | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 123 | if (mappable) { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 124 | if (obj->pin_mappable && obj->fault_mappable) | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 125 | /* Combined state was already mappable. */ | 
|  | 126 | return; | 
|  | 127 | dev_priv->mm.gtt_mappable_count++; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 128 | dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 129 | } else { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 130 | if (obj->pin_mappable || obj->fault_mappable) | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 131 | /* Combined state still mappable. */ | 
|  | 132 | return; | 
|  | 133 | dev_priv->mm.gtt_mappable_count--; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 134 | dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 135 | } | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 136 | } | 
|  | 137 |  | 
|  | 138 | static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 139 | struct drm_i915_gem_object *obj, | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 140 | bool mappable) | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 141 | { | 
|  | 142 | dev_priv->mm.pin_count++; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 143 | dev_priv->mm.pin_memory += obj->gtt_space->size; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 144 | if (mappable) { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 145 | obj->pin_mappable = true; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 146 | i915_gem_info_update_mappable(dev_priv, obj, true); | 
|  | 147 | } | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 148 | } | 
|  | 149 |  | 
|  | 150 | static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 151 | struct drm_i915_gem_object *obj) | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 152 | { | 
|  | 153 | dev_priv->mm.pin_count--; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 154 | dev_priv->mm.pin_memory -= obj->gtt_space->size; | 
|  | 155 | if (obj->pin_mappable) { | 
|  | 156 | obj->pin_mappable = false; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 157 | i915_gem_info_update_mappable(dev_priv, obj, false); | 
|  | 158 | } | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 159 | } | 
|  | 160 |  | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 161 | int | 
|  | 162 | i915_gem_check_is_wedged(struct drm_device *dev) | 
|  | 163 | { | 
|  | 164 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 165 | struct completion *x = &dev_priv->error_completion; | 
|  | 166 | unsigned long flags; | 
|  | 167 | int ret; | 
|  | 168 |  | 
|  | 169 | if (!atomic_read(&dev_priv->mm.wedged)) | 
|  | 170 | return 0; | 
|  | 171 |  | 
|  | 172 | ret = wait_for_completion_interruptible(x); | 
|  | 173 | if (ret) | 
|  | 174 | return ret; | 
|  | 175 |  | 
|  | 176 | /* Success, we reset the GPU! */ | 
|  | 177 | if (!atomic_read(&dev_priv->mm.wedged)) | 
|  | 178 | return 0; | 
|  | 179 |  | 
|  | 180 | /* GPU is hung, bump the completion count to account for | 
|  | 181 | * the token we just consumed so that we never hit zero and | 
|  | 182 | * end up waiting upon a subsequent completion event that | 
|  | 183 | * will never happen. | 
|  | 184 | */ | 
|  | 185 | spin_lock_irqsave(&x->wait.lock, flags); | 
|  | 186 | x->done++; | 
|  | 187 | spin_unlock_irqrestore(&x->wait.lock, flags); | 
|  | 188 | return -EIO; | 
|  | 189 | } | 
|  | 190 |  | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 191 | static int i915_mutex_lock_interruptible(struct drm_device *dev) | 
|  | 192 | { | 
|  | 193 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 194 | int ret; | 
|  | 195 |  | 
|  | 196 | ret = i915_gem_check_is_wedged(dev); | 
|  | 197 | if (ret) | 
|  | 198 | return ret; | 
|  | 199 |  | 
|  | 200 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 
|  | 201 | if (ret) | 
|  | 202 | return ret; | 
|  | 203 |  | 
|  | 204 | if (atomic_read(&dev_priv->mm.wedged)) { | 
|  | 205 | mutex_unlock(&dev->struct_mutex); | 
|  | 206 | return -EAGAIN; | 
|  | 207 | } | 
|  | 208 |  | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 209 | WARN_ON(i915_verify_lists(dev)); | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 210 | return 0; | 
|  | 211 | } | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 212 |  | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 213 | static inline bool | 
|  | 214 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | 
|  | 215 | { | 
|  | 216 | return obj_priv->gtt_space && | 
|  | 217 | !obj_priv->active && | 
|  | 218 | obj_priv->pin_count == 0; | 
|  | 219 | } | 
|  | 220 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 221 | int i915_gem_do_init(struct drm_device *dev, | 
|  | 222 | unsigned long start, | 
| Daniel Vetter | 5398463 | 2010-09-22 23:44:24 +0200 | [diff] [blame] | 223 | unsigned long mappable_end, | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 224 | unsigned long end) | 
|  | 225 | { | 
|  | 226 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 227 |  | 
|  | 228 | if (start >= end || | 
|  | 229 | (start & (PAGE_SIZE - 1)) != 0 || | 
|  | 230 | (end & (PAGE_SIZE - 1)) != 0) { | 
|  | 231 | return -EINVAL; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | drm_mm_init(&dev_priv->mm.gtt_space, start, | 
|  | 235 | end - start); | 
|  | 236 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 237 | dev_priv->mm.gtt_total = end - start; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 238 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; | 
| Daniel Vetter | 5398463 | 2010-09-22 23:44:24 +0200 | [diff] [blame] | 239 | dev_priv->mm.gtt_mappable_end = mappable_end; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 240 |  | 
|  | 241 | return 0; | 
|  | 242 | } | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 243 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 244 | int | 
|  | 245 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 
|  | 246 | struct drm_file *file_priv) | 
|  | 247 | { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 248 | struct drm_i915_gem_init *args = data; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 249 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 250 |  | 
|  | 251 | mutex_lock(&dev->struct_mutex); | 
| Daniel Vetter | 5398463 | 2010-09-22 23:44:24 +0200 | [diff] [blame] | 252 | ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 253 | mutex_unlock(&dev->struct_mutex); | 
|  | 254 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 255 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 256 | } | 
|  | 257 |  | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 258 | int | 
|  | 259 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 
|  | 260 | struct drm_file *file_priv) | 
|  | 261 | { | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 262 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 263 | struct drm_i915_gem_get_aperture *args = data; | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 264 |  | 
|  | 265 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 266 | return -ENODEV; | 
|  | 267 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 268 | mutex_lock(&dev->struct_mutex); | 
|  | 269 | args->aper_size = dev_priv->mm.gtt_total; | 
|  | 270 | args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; | 
|  | 271 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 272 |  | 
|  | 273 | return 0; | 
|  | 274 | } | 
|  | 275 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 276 |  | 
|  | 277 | /** | 
|  | 278 | * Creates a new mm object and returns a handle to it. | 
|  | 279 | */ | 
|  | 280 | int | 
|  | 281 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 
|  | 282 | struct drm_file *file_priv) | 
|  | 283 | { | 
|  | 284 | struct drm_i915_gem_create *args = data; | 
|  | 285 | struct drm_gem_object *obj; | 
| Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 286 | int ret; | 
|  | 287 | u32 handle; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 288 |  | 
|  | 289 | args->size = roundup(args->size, PAGE_SIZE); | 
|  | 290 |  | 
|  | 291 | /* Allocate the new object */ | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 292 | obj = i915_gem_alloc_object(dev, args->size); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 293 | if (obj == NULL) | 
|  | 294 | return -ENOMEM; | 
|  | 295 |  | 
|  | 296 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 
| Chris Wilson | 1dfd975 | 2010-09-06 14:44:14 +0100 | [diff] [blame] | 297 | if (ret) { | 
| Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 298 | drm_gem_object_release(obj); | 
|  | 299 | i915_gem_info_remove_obj(dev->dev_private, obj->size); | 
|  | 300 | kfree(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 301 | return ret; | 
| Chris Wilson | 1dfd975 | 2010-09-06 14:44:14 +0100 | [diff] [blame] | 302 | } | 
|  | 303 |  | 
| Chris Wilson | 202f2fe | 2010-10-14 13:20:40 +0100 | [diff] [blame] | 304 | /* drop reference from allocate - handle holds it now */ | 
|  | 305 | drm_gem_object_unreference(obj); | 
|  | 306 | trace_i915_gem_object_create(obj); | 
|  | 307 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 308 | args->handle = handle; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 309 | return 0; | 
|  | 310 | } | 
|  | 311 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 312 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 
|  | 313 | { | 
|  | 314 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 315 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 316 |  | 
|  | 317 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 
|  | 318 | obj_priv->tiling_mode != I915_TILING_NONE; | 
|  | 319 | } | 
|  | 320 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 321 | static inline void | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 322 | slow_shmem_copy(struct page *dst_page, | 
|  | 323 | int dst_offset, | 
|  | 324 | struct page *src_page, | 
|  | 325 | int src_offset, | 
|  | 326 | int length) | 
|  | 327 | { | 
|  | 328 | char *dst_vaddr, *src_vaddr; | 
|  | 329 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 330 | dst_vaddr = kmap(dst_page); | 
|  | 331 | src_vaddr = kmap(src_page); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 332 |  | 
|  | 333 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | 
|  | 334 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 335 | kunmap(src_page); | 
|  | 336 | kunmap(dst_page); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 337 | } | 
|  | 338 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 339 | static inline void | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 340 | slow_shmem_bit17_copy(struct page *gpu_page, | 
|  | 341 | int gpu_offset, | 
|  | 342 | struct page *cpu_page, | 
|  | 343 | int cpu_offset, | 
|  | 344 | int length, | 
|  | 345 | int is_read) | 
|  | 346 | { | 
|  | 347 | char *gpu_vaddr, *cpu_vaddr; | 
|  | 348 |  | 
|  | 349 | /* Use the unswizzled path if this page isn't affected. */ | 
|  | 350 | if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { | 
|  | 351 | if (is_read) | 
|  | 352 | return slow_shmem_copy(cpu_page, cpu_offset, | 
|  | 353 | gpu_page, gpu_offset, length); | 
|  | 354 | else | 
|  | 355 | return slow_shmem_copy(gpu_page, gpu_offset, | 
|  | 356 | cpu_page, cpu_offset, length); | 
|  | 357 | } | 
|  | 358 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 359 | gpu_vaddr = kmap(gpu_page); | 
|  | 360 | cpu_vaddr = kmap(cpu_page); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 361 |  | 
|  | 362 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | 
|  | 363 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | 
|  | 364 | */ | 
|  | 365 | while (length > 0) { | 
|  | 366 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | 
|  | 367 | int this_length = min(cacheline_end - gpu_offset, length); | 
|  | 368 | int swizzled_gpu_offset = gpu_offset ^ 64; | 
|  | 369 |  | 
|  | 370 | if (is_read) { | 
|  | 371 | memcpy(cpu_vaddr + cpu_offset, | 
|  | 372 | gpu_vaddr + swizzled_gpu_offset, | 
|  | 373 | this_length); | 
|  | 374 | } else { | 
|  | 375 | memcpy(gpu_vaddr + swizzled_gpu_offset, | 
|  | 376 | cpu_vaddr + cpu_offset, | 
|  | 377 | this_length); | 
|  | 378 | } | 
|  | 379 | cpu_offset += this_length; | 
|  | 380 | gpu_offset += this_length; | 
|  | 381 | length -= this_length; | 
|  | 382 | } | 
|  | 383 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 384 | kunmap(cpu_page); | 
|  | 385 | kunmap(gpu_page); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 386 | } | 
|  | 387 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 388 | /** | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 389 | * This is the fast shmem pread path, which attempts to copy_from_user directly | 
|  | 390 | * from the backing pages of the object to the user's address space.  On a | 
|  | 391 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | 
|  | 392 | */ | 
|  | 393 | static int | 
|  | 394 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 395 | struct drm_i915_gem_pread *args, | 
|  | 396 | struct drm_file *file_priv) | 
|  | 397 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 398 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 399 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 400 | ssize_t remain; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 401 | loff_t offset; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 402 | char __user *user_data; | 
|  | 403 | int page_offset, page_length; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 404 |  | 
|  | 405 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 406 | remain = args->size; | 
|  | 407 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 408 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 409 | offset = args->offset; | 
|  | 410 |  | 
|  | 411 | while (remain > 0) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 412 | struct page *page; | 
|  | 413 | char *vaddr; | 
|  | 414 | int ret; | 
|  | 415 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 416 | /* Operation in this page | 
|  | 417 | * | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 418 | * page_offset = offset within page | 
|  | 419 | * page_length = bytes to copy for this page | 
|  | 420 | */ | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 421 | page_offset = offset & (PAGE_SIZE-1); | 
|  | 422 | page_length = remain; | 
|  | 423 | if ((page_offset + remain) > PAGE_SIZE) | 
|  | 424 | page_length = PAGE_SIZE - page_offset; | 
|  | 425 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 426 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 
|  | 427 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 
|  | 428 | if (IS_ERR(page)) | 
|  | 429 | return PTR_ERR(page); | 
|  | 430 |  | 
|  | 431 | vaddr = kmap_atomic(page); | 
|  | 432 | ret = __copy_to_user_inatomic(user_data, | 
|  | 433 | vaddr + page_offset, | 
|  | 434 | page_length); | 
|  | 435 | kunmap_atomic(vaddr); | 
|  | 436 |  | 
|  | 437 | mark_page_accessed(page); | 
|  | 438 | page_cache_release(page); | 
|  | 439 | if (ret) | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 440 | return -EFAULT; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 441 |  | 
|  | 442 | remain -= page_length; | 
|  | 443 | user_data += page_length; | 
|  | 444 | offset += page_length; | 
|  | 445 | } | 
|  | 446 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 447 | return 0; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 448 | } | 
|  | 449 |  | 
|  | 450 | /** | 
|  | 451 | * This is the fallback shmem pread path, which allocates temporary storage | 
|  | 452 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 
|  | 453 | * can copy out of the object's backing pages while holding the struct mutex | 
|  | 454 | * and not take page faults. | 
|  | 455 | */ | 
|  | 456 | static int | 
|  | 457 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 458 | struct drm_i915_gem_pread *args, | 
|  | 459 | struct drm_file *file_priv) | 
|  | 460 | { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 461 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 462 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 463 | struct mm_struct *mm = current->mm; | 
|  | 464 | struct page **user_pages; | 
|  | 465 | ssize_t remain; | 
|  | 466 | loff_t offset, pinned_pages, i; | 
|  | 467 | loff_t first_data_page, last_data_page, num_pages; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 468 | int shmem_page_offset; | 
|  | 469 | int data_page_index, data_page_offset; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 470 | int page_length; | 
|  | 471 | int ret; | 
|  | 472 | uint64_t data_ptr = args->data_ptr; | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 473 | int do_bit17_swizzling; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 474 |  | 
|  | 475 | remain = args->size; | 
|  | 476 |  | 
|  | 477 | /* Pin the user pages containing the data.  We can't fault while | 
|  | 478 | * holding the struct mutex, yet we want to hold it while | 
|  | 479 | * dereferencing the user data. | 
|  | 480 | */ | 
|  | 481 | first_data_page = data_ptr / PAGE_SIZE; | 
|  | 482 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 
|  | 483 | num_pages = last_data_page - first_data_page + 1; | 
|  | 484 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 485 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 486 | if (user_pages == NULL) | 
|  | 487 | return -ENOMEM; | 
|  | 488 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 489 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 490 | down_read(&mm->mmap_sem); | 
|  | 491 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 
| Eric Anholt | e5e9ecd | 2009-04-07 16:01:22 -0700 | [diff] [blame] | 492 | num_pages, 1, 0, user_pages, NULL); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 493 | up_read(&mm->mmap_sem); | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 494 | mutex_lock(&dev->struct_mutex); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 495 | if (pinned_pages < num_pages) { | 
|  | 496 | ret = -EFAULT; | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 497 | goto out; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 498 | } | 
|  | 499 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 500 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | 
|  | 501 | args->offset, | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 502 | args->size); | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 503 | if (ret) | 
|  | 504 | goto out; | 
|  | 505 |  | 
|  | 506 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 507 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 508 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 509 | offset = args->offset; | 
|  | 510 |  | 
|  | 511 | while (remain > 0) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 512 | struct page *page; | 
|  | 513 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 514 | /* Operation in this page | 
|  | 515 | * | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 516 | * shmem_page_offset = offset within page in shmem file | 
|  | 517 | * data_page_index = page number in get_user_pages return | 
|  | 518 | * data_page_offset = offset with data_page_index page. | 
|  | 519 | * page_length = bytes to copy for this page | 
|  | 520 | */ | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 521 | shmem_page_offset = offset & ~PAGE_MASK; | 
|  | 522 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 
|  | 523 | data_page_offset = data_ptr & ~PAGE_MASK; | 
|  | 524 |  | 
|  | 525 | page_length = remain; | 
|  | 526 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 
|  | 527 | page_length = PAGE_SIZE - shmem_page_offset; | 
|  | 528 | if ((data_page_offset + page_length) > PAGE_SIZE) | 
|  | 529 | page_length = PAGE_SIZE - data_page_offset; | 
|  | 530 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 531 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 
|  | 532 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 
|  | 533 | if (IS_ERR(page)) | 
|  | 534 | return PTR_ERR(page); | 
|  | 535 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 536 | if (do_bit17_swizzling) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 537 | slow_shmem_bit17_copy(page, | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 538 | shmem_page_offset, | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 539 | user_pages[data_page_index], | 
|  | 540 | data_page_offset, | 
|  | 541 | page_length, | 
|  | 542 | 1); | 
|  | 543 | } else { | 
|  | 544 | slow_shmem_copy(user_pages[data_page_index], | 
|  | 545 | data_page_offset, | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 546 | page, | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 547 | shmem_page_offset, | 
|  | 548 | page_length); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 549 | } | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 550 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 551 | mark_page_accessed(page); | 
|  | 552 | page_cache_release(page); | 
|  | 553 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 554 | remain -= page_length; | 
|  | 555 | data_ptr += page_length; | 
|  | 556 | offset += page_length; | 
|  | 557 | } | 
|  | 558 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 559 | out: | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 560 | for (i = 0; i < pinned_pages; i++) { | 
|  | 561 | SetPageDirty(user_pages[i]); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 562 | mark_page_accessed(user_pages[i]); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 563 | page_cache_release(user_pages[i]); | 
|  | 564 | } | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 565 | drm_free_large(user_pages); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 566 |  | 
|  | 567 | return ret; | 
|  | 568 | } | 
|  | 569 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 570 | /** | 
|  | 571 | * Reads data from the object referenced by handle. | 
|  | 572 | * | 
|  | 573 | * On error, the contents of *data are undefined. | 
|  | 574 | */ | 
|  | 575 | int | 
|  | 576 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 
|  | 577 | struct drm_file *file_priv) | 
|  | 578 | { | 
|  | 579 | struct drm_i915_gem_pread *args = data; | 
|  | 580 | struct drm_gem_object *obj; | 
|  | 581 | struct drm_i915_gem_object *obj_priv; | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 582 | int ret = 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 583 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 584 | ret = i915_mutex_lock_interruptible(dev); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 585 | if (ret) | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 586 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 587 |  | 
|  | 588 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 589 | if (obj == NULL) { | 
|  | 590 | ret = -ENOENT; | 
|  | 591 | goto unlock; | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 592 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 593 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 594 |  | 
| Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 595 | /* Bounds check source.  */ | 
|  | 596 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 597 | ret = -EINVAL; | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 598 | goto out; | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 599 | } | 
|  | 600 |  | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 601 | if (args->size == 0) | 
|  | 602 | goto out; | 
|  | 603 |  | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 604 | if (!access_ok(VERIFY_WRITE, | 
|  | 605 | (char __user *)(uintptr_t)args->data_ptr, | 
|  | 606 | args->size)) { | 
|  | 607 | ret = -EFAULT; | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 608 | goto out; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 609 | } | 
|  | 610 |  | 
| Chris Wilson | b5e4feb | 2010-10-14 13:47:43 +0100 | [diff] [blame] | 611 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, | 
|  | 612 | args->size); | 
|  | 613 | if (ret) { | 
|  | 614 | ret = -EFAULT; | 
|  | 615 | goto out; | 
|  | 616 | } | 
|  | 617 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 618 | ret = i915_gem_object_set_cpu_read_domain_range(obj, | 
|  | 619 | args->offset, | 
|  | 620 | args->size); | 
|  | 621 | if (ret) | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 622 | goto out; | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 623 |  | 
|  | 624 | ret = -EFAULT; | 
|  | 625 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 626 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 627 | if (ret == -EFAULT) | 
|  | 628 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 629 |  | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 630 | out: | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 631 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 632 | unlock: | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 633 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 634 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 635 | } | 
|  | 636 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 637 | /* This is the fast write path which cannot handle | 
|  | 638 | * page faults in the source data | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 639 | */ | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 640 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 641 | static inline int | 
|  | 642 | fast_user_write(struct io_mapping *mapping, | 
|  | 643 | loff_t page_base, int page_offset, | 
|  | 644 | char __user *user_data, | 
|  | 645 | int length) | 
|  | 646 | { | 
|  | 647 | char *vaddr_atomic; | 
|  | 648 | unsigned long unwritten; | 
|  | 649 |  | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 650 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 651 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, | 
|  | 652 | user_data, length); | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 653 | io_mapping_unmap_atomic(vaddr_atomic); | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 654 | return unwritten; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 655 | } | 
|  | 656 |  | 
|  | 657 | /* Here's the write path which can sleep for | 
|  | 658 | * page faults | 
|  | 659 | */ | 
|  | 660 |  | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 661 | static inline void | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 662 | slow_kernel_write(struct io_mapping *mapping, | 
|  | 663 | loff_t gtt_base, int gtt_offset, | 
|  | 664 | struct page *user_page, int user_offset, | 
|  | 665 | int length) | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 666 | { | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 667 | char __iomem *dst_vaddr; | 
|  | 668 | char *src_vaddr; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 669 |  | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 670 | dst_vaddr = io_mapping_map_wc(mapping, gtt_base); | 
|  | 671 | src_vaddr = kmap(user_page); | 
|  | 672 |  | 
|  | 673 | memcpy_toio(dst_vaddr + gtt_offset, | 
|  | 674 | src_vaddr + user_offset, | 
|  | 675 | length); | 
|  | 676 |  | 
|  | 677 | kunmap(user_page); | 
|  | 678 | io_mapping_unmap(dst_vaddr); | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 679 | } | 
|  | 680 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 681 | /** | 
|  | 682 | * This is the fast pwrite path, where we copy the data directly from the | 
|  | 683 | * user into the GTT, uncached. | 
|  | 684 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 685 | static int | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 686 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 687 | struct drm_i915_gem_pwrite *args, | 
|  | 688 | struct drm_file *file_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 689 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 690 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 691 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 692 | ssize_t remain; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 693 | loff_t offset, page_base; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 694 | char __user *user_data; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 695 | int page_offset, page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 696 |  | 
|  | 697 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 698 | remain = args->size; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 699 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 700 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 701 | offset = obj_priv->gtt_offset + args->offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 702 |  | 
|  | 703 | while (remain > 0) { | 
|  | 704 | /* Operation in this page | 
|  | 705 | * | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 706 | * page_base = page offset within aperture | 
|  | 707 | * page_offset = offset within page | 
|  | 708 | * page_length = bytes to copy for this page | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 709 | */ | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 710 | page_base = (offset & ~(PAGE_SIZE-1)); | 
|  | 711 | page_offset = offset & (PAGE_SIZE-1); | 
|  | 712 | page_length = remain; | 
|  | 713 | if ((page_offset + remain) > PAGE_SIZE) | 
|  | 714 | page_length = PAGE_SIZE - page_offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 715 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 716 | /* If we get a fault while copying data, then (presumably) our | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 717 | * source page isn't available.  Return the error and we'll | 
|  | 718 | * retry in the slow path. | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 719 | */ | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 720 | if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, | 
|  | 721 | page_offset, user_data, page_length)) | 
|  | 722 |  | 
|  | 723 | return -EFAULT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 724 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 725 | remain -= page_length; | 
|  | 726 | user_data += page_length; | 
|  | 727 | offset += page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 728 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 729 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 730 | return 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 731 | } | 
|  | 732 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 733 | /** | 
|  | 734 | * This is the fallback GTT pwrite path, which uses get_user_pages to pin | 
|  | 735 | * the memory and maps it using kmap_atomic for copying. | 
|  | 736 | * | 
|  | 737 | * This code resulted in x11perf -rgb10text consuming about 10% more CPU | 
|  | 738 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | 
|  | 739 | */ | 
| Eric Anholt | 3043c60 | 2008-10-02 12:24:47 -0700 | [diff] [blame] | 740 | static int | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 741 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 742 | struct drm_i915_gem_pwrite *args, | 
|  | 743 | struct drm_file *file_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 744 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 745 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 746 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 747 | ssize_t remain; | 
|  | 748 | loff_t gtt_page_base, offset; | 
|  | 749 | loff_t first_data_page, last_data_page, num_pages; | 
|  | 750 | loff_t pinned_pages, i; | 
|  | 751 | struct page **user_pages; | 
|  | 752 | struct mm_struct *mm = current->mm; | 
|  | 753 | int gtt_page_offset, data_page_offset, data_page_index, page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 754 | int ret; | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 755 | uint64_t data_ptr = args->data_ptr; | 
|  | 756 |  | 
|  | 757 | remain = args->size; | 
|  | 758 |  | 
|  | 759 | /* Pin the user pages containing the data.  We can't fault while | 
|  | 760 | * holding the struct mutex, and all of the pwrite implementations | 
|  | 761 | * want to hold it while dereferencing the user data. | 
|  | 762 | */ | 
|  | 763 | first_data_page = data_ptr / PAGE_SIZE; | 
|  | 764 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 
|  | 765 | num_pages = last_data_page - first_data_page + 1; | 
|  | 766 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 767 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 768 | if (user_pages == NULL) | 
|  | 769 | return -ENOMEM; | 
|  | 770 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 771 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 772 | down_read(&mm->mmap_sem); | 
|  | 773 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 
|  | 774 | num_pages, 0, 0, user_pages, NULL); | 
|  | 775 | up_read(&mm->mmap_sem); | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 776 | mutex_lock(&dev->struct_mutex); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 777 | if (pinned_pages < num_pages) { | 
|  | 778 | ret = -EFAULT; | 
|  | 779 | goto out_unpin_pages; | 
|  | 780 | } | 
|  | 781 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 782 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 
|  | 783 | if (ret) | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 784 | goto out_unpin_pages; | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 785 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 786 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 787 | offset = obj_priv->gtt_offset + args->offset; | 
|  | 788 |  | 
|  | 789 | while (remain > 0) { | 
|  | 790 | /* Operation in this page | 
|  | 791 | * | 
|  | 792 | * gtt_page_base = page offset within aperture | 
|  | 793 | * gtt_page_offset = offset within page in aperture | 
|  | 794 | * data_page_index = page number in get_user_pages return | 
|  | 795 | * data_page_offset = offset with data_page_index page. | 
|  | 796 | * page_length = bytes to copy for this page | 
|  | 797 | */ | 
|  | 798 | gtt_page_base = offset & PAGE_MASK; | 
|  | 799 | gtt_page_offset = offset & ~PAGE_MASK; | 
|  | 800 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 
|  | 801 | data_page_offset = data_ptr & ~PAGE_MASK; | 
|  | 802 |  | 
|  | 803 | page_length = remain; | 
|  | 804 | if ((gtt_page_offset + page_length) > PAGE_SIZE) | 
|  | 805 | page_length = PAGE_SIZE - gtt_page_offset; | 
|  | 806 | if ((data_page_offset + page_length) > PAGE_SIZE) | 
|  | 807 | page_length = PAGE_SIZE - data_page_offset; | 
|  | 808 |  | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 809 | slow_kernel_write(dev_priv->mm.gtt_mapping, | 
|  | 810 | gtt_page_base, gtt_page_offset, | 
|  | 811 | user_pages[data_page_index], | 
|  | 812 | data_page_offset, | 
|  | 813 | page_length); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 814 |  | 
|  | 815 | remain -= page_length; | 
|  | 816 | offset += page_length; | 
|  | 817 | data_ptr += page_length; | 
|  | 818 | } | 
|  | 819 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 820 | out_unpin_pages: | 
|  | 821 | for (i = 0; i < pinned_pages; i++) | 
|  | 822 | page_cache_release(user_pages[i]); | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 823 | drm_free_large(user_pages); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 824 |  | 
|  | 825 | return ret; | 
|  | 826 | } | 
|  | 827 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 828 | /** | 
|  | 829 | * This is the fast shmem pwrite path, which attempts to directly | 
|  | 830 | * copy_from_user into the kmapped pages backing the object. | 
|  | 831 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 832 | static int | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 833 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 834 | struct drm_i915_gem_pwrite *args, | 
|  | 835 | struct drm_file *file_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 836 | { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 837 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 838 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 839 | ssize_t remain; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 840 | loff_t offset; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 841 | char __user *user_data; | 
|  | 842 | int page_offset, page_length; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 843 |  | 
|  | 844 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 845 | remain = args->size; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 846 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 847 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 848 | offset = args->offset; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 849 | obj_priv->dirty = 1; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 850 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 851 | while (remain > 0) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 852 | struct page *page; | 
|  | 853 | char *vaddr; | 
|  | 854 | int ret; | 
|  | 855 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 856 | /* Operation in this page | 
|  | 857 | * | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 858 | * page_offset = offset within page | 
|  | 859 | * page_length = bytes to copy for this page | 
|  | 860 | */ | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 861 | page_offset = offset & (PAGE_SIZE-1); | 
|  | 862 | page_length = remain; | 
|  | 863 | if ((page_offset + remain) > PAGE_SIZE) | 
|  | 864 | page_length = PAGE_SIZE - page_offset; | 
|  | 865 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 866 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 
|  | 867 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 
|  | 868 | if (IS_ERR(page)) | 
|  | 869 | return PTR_ERR(page); | 
|  | 870 |  | 
|  | 871 | vaddr = kmap_atomic(page, KM_USER0); | 
|  | 872 | ret = __copy_from_user_inatomic(vaddr + page_offset, | 
|  | 873 | user_data, | 
|  | 874 | page_length); | 
|  | 875 | kunmap_atomic(vaddr, KM_USER0); | 
|  | 876 |  | 
|  | 877 | set_page_dirty(page); | 
|  | 878 | mark_page_accessed(page); | 
|  | 879 | page_cache_release(page); | 
|  | 880 |  | 
|  | 881 | /* If we get a fault while copying data, then (presumably) our | 
|  | 882 | * source page isn't available.  Return the error and we'll | 
|  | 883 | * retry in the slow path. | 
|  | 884 | */ | 
|  | 885 | if (ret) | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 886 | return -EFAULT; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 887 |  | 
|  | 888 | remain -= page_length; | 
|  | 889 | user_data += page_length; | 
|  | 890 | offset += page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 891 | } | 
|  | 892 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 893 | return 0; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 894 | } | 
|  | 895 |  | 
|  | 896 | /** | 
|  | 897 | * This is the fallback shmem pwrite path, which uses get_user_pages to pin | 
|  | 898 | * the memory and maps it using kmap_atomic for copying. | 
|  | 899 | * | 
|  | 900 | * This avoids taking mmap_sem for faulting on the user's address while the | 
|  | 901 | * struct_mutex is held. | 
|  | 902 | */ | 
|  | 903 | static int | 
|  | 904 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 905 | struct drm_i915_gem_pwrite *args, | 
|  | 906 | struct drm_file *file_priv) | 
|  | 907 | { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 908 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 909 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 910 | struct mm_struct *mm = current->mm; | 
|  | 911 | struct page **user_pages; | 
|  | 912 | ssize_t remain; | 
|  | 913 | loff_t offset, pinned_pages, i; | 
|  | 914 | loff_t first_data_page, last_data_page, num_pages; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 915 | int shmem_page_offset; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 916 | int data_page_index,  data_page_offset; | 
|  | 917 | int page_length; | 
|  | 918 | int ret; | 
|  | 919 | uint64_t data_ptr = args->data_ptr; | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 920 | int do_bit17_swizzling; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 921 |  | 
|  | 922 | remain = args->size; | 
|  | 923 |  | 
|  | 924 | /* Pin the user pages containing the data.  We can't fault while | 
|  | 925 | * holding the struct mutex, and all of the pwrite implementations | 
|  | 926 | * want to hold it while dereferencing the user data. | 
|  | 927 | */ | 
|  | 928 | first_data_page = data_ptr / PAGE_SIZE; | 
|  | 929 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 
|  | 930 | num_pages = last_data_page - first_data_page + 1; | 
|  | 931 |  | 
| Chris Wilson | 4f27b75 | 2010-10-14 15:26:45 +0100 | [diff] [blame] | 932 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 933 | if (user_pages == NULL) | 
|  | 934 | return -ENOMEM; | 
|  | 935 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 936 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 937 | down_read(&mm->mmap_sem); | 
|  | 938 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 
|  | 939 | num_pages, 0, 0, user_pages, NULL); | 
|  | 940 | up_read(&mm->mmap_sem); | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 941 | mutex_lock(&dev->struct_mutex); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 942 | if (pinned_pages < num_pages) { | 
|  | 943 | ret = -EFAULT; | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 944 | goto out; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 945 | } | 
|  | 946 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 947 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 948 | if (ret) | 
|  | 949 | goto out; | 
|  | 950 |  | 
|  | 951 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 952 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 953 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 954 | offset = args->offset; | 
|  | 955 | obj_priv->dirty = 1; | 
|  | 956 |  | 
|  | 957 | while (remain > 0) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 958 | struct page *page; | 
|  | 959 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 960 | /* Operation in this page | 
|  | 961 | * | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 962 | * shmem_page_offset = offset within page in shmem file | 
|  | 963 | * data_page_index = page number in get_user_pages return | 
|  | 964 | * data_page_offset = offset with data_page_index page. | 
|  | 965 | * page_length = bytes to copy for this page | 
|  | 966 | */ | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 967 | shmem_page_offset = offset & ~PAGE_MASK; | 
|  | 968 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 
|  | 969 | data_page_offset = data_ptr & ~PAGE_MASK; | 
|  | 970 |  | 
|  | 971 | page_length = remain; | 
|  | 972 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 
|  | 973 | page_length = PAGE_SIZE - shmem_page_offset; | 
|  | 974 | if ((data_page_offset + page_length) > PAGE_SIZE) | 
|  | 975 | page_length = PAGE_SIZE - data_page_offset; | 
|  | 976 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 977 | page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, | 
|  | 978 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 
|  | 979 | if (IS_ERR(page)) { | 
|  | 980 | ret = PTR_ERR(page); | 
|  | 981 | goto out; | 
|  | 982 | } | 
|  | 983 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 984 | if (do_bit17_swizzling) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 985 | slow_shmem_bit17_copy(page, | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 986 | shmem_page_offset, | 
|  | 987 | user_pages[data_page_index], | 
|  | 988 | data_page_offset, | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 989 | page_length, | 
|  | 990 | 0); | 
|  | 991 | } else { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 992 | slow_shmem_copy(page, | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 993 | shmem_page_offset, | 
|  | 994 | user_pages[data_page_index], | 
|  | 995 | data_page_offset, | 
|  | 996 | page_length); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 997 | } | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 998 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 999 | set_page_dirty(page); | 
|  | 1000 | mark_page_accessed(page); | 
|  | 1001 | page_cache_release(page); | 
|  | 1002 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1003 | remain -= page_length; | 
|  | 1004 | data_ptr += page_length; | 
|  | 1005 | offset += page_length; | 
|  | 1006 | } | 
|  | 1007 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1008 | out: | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1009 | for (i = 0; i < pinned_pages; i++) | 
|  | 1010 | page_cache_release(user_pages[i]); | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 1011 | drm_free_large(user_pages); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1012 |  | 
|  | 1013 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1014 | } | 
|  | 1015 |  | 
|  | 1016 | /** | 
|  | 1017 | * Writes data to the object referenced by handle. | 
|  | 1018 | * | 
|  | 1019 | * On error, the contents of the buffer that were to be modified are undefined. | 
|  | 1020 | */ | 
|  | 1021 | int | 
|  | 1022 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1023 | struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1024 | { | 
|  | 1025 | struct drm_i915_gem_pwrite *args = data; | 
|  | 1026 | struct drm_gem_object *obj; | 
|  | 1027 | struct drm_i915_gem_object *obj_priv; | 
|  | 1028 | int ret = 0; | 
|  | 1029 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1030 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 1031 | if (ret) | 
|  | 1032 | return ret; | 
|  | 1033 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1034 | obj = drm_gem_object_lookup(dev, file, args->handle); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1035 | if (obj == NULL) { | 
|  | 1036 | ret = -ENOENT; | 
|  | 1037 | goto unlock; | 
|  | 1038 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1039 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1040 |  | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1041 |  | 
| Chris Wilson | 7dcd249 | 2010-09-26 20:21:44 +0100 | [diff] [blame] | 1042 | /* Bounds check destination. */ | 
|  | 1043 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1044 | ret = -EINVAL; | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 1045 | goto out; | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1046 | } | 
|  | 1047 |  | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 1048 | if (args->size == 0) | 
|  | 1049 | goto out; | 
|  | 1050 |  | 
| Chris Wilson | ce9d419 | 2010-09-26 20:50:05 +0100 | [diff] [blame] | 1051 | if (!access_ok(VERIFY_READ, | 
|  | 1052 | (char __user *)(uintptr_t)args->data_ptr, | 
|  | 1053 | args->size)) { | 
|  | 1054 | ret = -EFAULT; | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 1055 | goto out; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1056 | } | 
|  | 1057 |  | 
| Chris Wilson | b5e4feb | 2010-10-14 13:47:43 +0100 | [diff] [blame] | 1058 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | 
|  | 1059 | args->size); | 
|  | 1060 | if (ret) { | 
|  | 1061 | ret = -EFAULT; | 
|  | 1062 | goto out; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1063 | } | 
|  | 1064 |  | 
|  | 1065 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 
|  | 1066 | * it would end up going through the fenced access, and we'll get | 
|  | 1067 | * different detiling behavior between reading and writing. | 
|  | 1068 | * pread/pwrite currently are reading and writing from the CPU | 
|  | 1069 | * perspective, requiring manual detiling by the client. | 
|  | 1070 | */ | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1071 | if (obj_priv->phys_obj) | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1072 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 1073 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 
| Chris Wilson | 5cdf588 | 2010-09-27 15:51:07 +0100 | [diff] [blame] | 1074 | obj_priv->gtt_space && | 
| Chris Wilson | 9b8c4a0 | 2010-05-27 14:21:01 +0100 | [diff] [blame] | 1075 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 1076 | ret = i915_gem_object_pin(obj, 0, true); | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1077 | if (ret) | 
|  | 1078 | goto out; | 
|  | 1079 |  | 
|  | 1080 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 
|  | 1081 | if (ret) | 
|  | 1082 | goto out_unpin; | 
|  | 1083 |  | 
|  | 1084 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); | 
|  | 1085 | if (ret == -EFAULT) | 
|  | 1086 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); | 
|  | 1087 |  | 
|  | 1088 | out_unpin: | 
|  | 1089 | i915_gem_object_unpin(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1090 | } else { | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1091 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 
|  | 1092 | if (ret) | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 1093 | goto out; | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1094 |  | 
|  | 1095 | ret = -EFAULT; | 
|  | 1096 | if (!i915_gem_object_needs_bit17_swizzle(obj)) | 
|  | 1097 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); | 
|  | 1098 | if (ret == -EFAULT) | 
|  | 1099 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 1100 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1101 |  | 
| Chris Wilson | 35b62a8 | 2010-09-26 20:23:38 +0100 | [diff] [blame] | 1102 | out: | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1103 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1104 | unlock: | 
| Chris Wilson | fbd5a26 | 2010-10-14 15:03:58 +0100 | [diff] [blame] | 1105 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1106 | return ret; | 
|  | 1107 | } | 
|  | 1108 |  | 
|  | 1109 | /** | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1110 | * Called when user space prepares to use an object with the CPU, either | 
|  | 1111 | * through the mmap ioctl's mapping or a GTT mapping. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1112 | */ | 
|  | 1113 | int | 
|  | 1114 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 
|  | 1115 | struct drm_file *file_priv) | 
|  | 1116 | { | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1117 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1118 | struct drm_i915_gem_set_domain *args = data; | 
|  | 1119 | struct drm_gem_object *obj; | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1120 | struct drm_i915_gem_object *obj_priv; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1121 | uint32_t read_domains = args->read_domains; | 
|  | 1122 | uint32_t write_domain = args->write_domain; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1123 | int ret; | 
|  | 1124 |  | 
|  | 1125 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1126 | return -ENODEV; | 
|  | 1127 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1128 | /* Only handle setting domains to types used by the CPU. */ | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 1129 | if (write_domain & I915_GEM_GPU_DOMAINS) | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1130 | return -EINVAL; | 
|  | 1131 |  | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 1132 | if (read_domains & I915_GEM_GPU_DOMAINS) | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1133 | return -EINVAL; | 
|  | 1134 |  | 
|  | 1135 | /* Having something in the write domain implies it's in the read | 
|  | 1136 | * domain, and only that read domain.  Enforce that in the request. | 
|  | 1137 | */ | 
|  | 1138 | if (write_domain != 0 && read_domains != write_domain) | 
|  | 1139 | return -EINVAL; | 
|  | 1140 |  | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1141 | ret = i915_mutex_lock_interruptible(dev); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1142 | if (ret) | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1143 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1144 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1145 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1146 | if (obj == NULL) { | 
|  | 1147 | ret = -ENOENT; | 
|  | 1148 | goto unlock; | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1149 | } | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1150 | obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1151 |  | 
|  | 1152 | intel_mark_busy(dev, obj); | 
|  | 1153 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1154 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 
|  | 1155 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 
| Eric Anholt | 0235439 | 2008-11-26 13:58:13 -0800 | [diff] [blame] | 1156 |  | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1157 | /* Update the LRU on the fence for the CPU access that's | 
|  | 1158 | * about to occur. | 
|  | 1159 | */ | 
|  | 1160 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 1161 | struct drm_i915_fence_reg *reg = | 
|  | 1162 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 1163 | list_move_tail(®->lru_list, | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1164 | &dev_priv->mm.fence_list); | 
|  | 1165 | } | 
|  | 1166 |  | 
| Eric Anholt | 0235439 | 2008-11-26 13:58:13 -0800 | [diff] [blame] | 1167 | /* Silently promote "you're not bound, there was nothing to do" | 
|  | 1168 | * to success, since the client was just asking us to | 
|  | 1169 | * make sure everything was done. | 
|  | 1170 | */ | 
|  | 1171 | if (ret == -EINVAL) | 
|  | 1172 | ret = 0; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1173 | } else { | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1174 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1175 | } | 
|  | 1176 |  | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 1177 | /* Maintain LRU order of "inactive" objects */ | 
|  | 1178 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1179 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 1180 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1181 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1182 | unlock: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1183 | mutex_unlock(&dev->struct_mutex); | 
|  | 1184 | return ret; | 
|  | 1185 | } | 
|  | 1186 |  | 
|  | 1187 | /** | 
|  | 1188 | * Called when user space has done writes to this buffer | 
|  | 1189 | */ | 
|  | 1190 | int | 
|  | 1191 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 
|  | 1192 | struct drm_file *file_priv) | 
|  | 1193 | { | 
|  | 1194 | struct drm_i915_gem_sw_finish *args = data; | 
|  | 1195 | struct drm_gem_object *obj; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1196 | int ret = 0; | 
|  | 1197 |  | 
|  | 1198 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1199 | return -ENODEV; | 
|  | 1200 |  | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1201 | ret = i915_mutex_lock_interruptible(dev); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1202 | if (ret) | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1203 | return ret; | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1204 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1205 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1206 | if (obj == NULL) { | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1207 | ret = -ENOENT; | 
|  | 1208 | goto unlock; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1209 | } | 
|  | 1210 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1211 | /* Pinned buffers may be scanout, so flush the cache */ | 
| Chris Wilson | 3d2a812 | 2010-09-29 11:39:53 +0100 | [diff] [blame] | 1212 | if (to_intel_bo(obj)->pin_count) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1213 | i915_gem_object_flush_cpu_write_domain(obj); | 
|  | 1214 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1215 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1216 | unlock: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1217 | mutex_unlock(&dev->struct_mutex); | 
|  | 1218 | return ret; | 
|  | 1219 | } | 
|  | 1220 |  | 
|  | 1221 | /** | 
|  | 1222 | * Maps the contents of an object, returning the address it is mapped | 
|  | 1223 | * into. | 
|  | 1224 | * | 
|  | 1225 | * While the mapping holds a reference on the contents of the object, it doesn't | 
|  | 1226 | * imply a ref on the object itself. | 
|  | 1227 | */ | 
|  | 1228 | int | 
|  | 1229 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 
|  | 1230 | struct drm_file *file_priv) | 
|  | 1231 | { | 
| Chris Wilson | da761a6 | 2010-10-27 17:37:08 +0100 | [diff] [blame] | 1232 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1233 | struct drm_i915_gem_mmap *args = data; | 
|  | 1234 | struct drm_gem_object *obj; | 
|  | 1235 | loff_t offset; | 
|  | 1236 | unsigned long addr; | 
|  | 1237 |  | 
|  | 1238 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1239 | return -ENODEV; | 
|  | 1240 |  | 
|  | 1241 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1242 | if (obj == NULL) | 
| Chris Wilson | bf79cb9 | 2010-08-04 14:19:46 +0100 | [diff] [blame] | 1243 | return -ENOENT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1244 |  | 
| Chris Wilson | da761a6 | 2010-10-27 17:37:08 +0100 | [diff] [blame] | 1245 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | 
|  | 1246 | drm_gem_object_unreference_unlocked(obj); | 
|  | 1247 | return -E2BIG; | 
|  | 1248 | } | 
|  | 1249 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1250 | offset = args->offset; | 
|  | 1251 |  | 
|  | 1252 | down_write(¤t->mm->mmap_sem); | 
|  | 1253 | addr = do_mmap(obj->filp, 0, args->size, | 
|  | 1254 | PROT_READ | PROT_WRITE, MAP_SHARED, | 
|  | 1255 | args->offset); | 
|  | 1256 | up_write(¤t->mm->mmap_sem); | 
| Luca Barbieri | bc9025b | 2010-02-09 05:49:12 +0000 | [diff] [blame] | 1257 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1258 | if (IS_ERR((void *)addr)) | 
|  | 1259 | return addr; | 
|  | 1260 |  | 
|  | 1261 | args->addr_ptr = (uint64_t) addr; | 
|  | 1262 |  | 
|  | 1263 | return 0; | 
|  | 1264 | } | 
|  | 1265 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1266 | /** | 
|  | 1267 | * i915_gem_fault - fault a page into the GTT | 
|  | 1268 | * vma: VMA in question | 
|  | 1269 | * vmf: fault info | 
|  | 1270 | * | 
|  | 1271 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | 
|  | 1272 | * from userspace.  The fault handler takes care of binding the object to | 
|  | 1273 | * the GTT (if needed), allocating and programming a fence register (again, | 
|  | 1274 | * only if needed based on whether the old reg is still valid or the object | 
|  | 1275 | * is tiled) and inserting a new PTE into the faulting process. | 
|  | 1276 | * | 
|  | 1277 | * Note that the faulting process may involve evicting existing objects | 
|  | 1278 | * from the GTT and/or fence registers to make room.  So performance may | 
|  | 1279 | * suffer if the GTT working set is large or there are few fence registers | 
|  | 1280 | * left. | 
|  | 1281 | */ | 
|  | 1282 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
|  | 1283 | { | 
|  | 1284 | struct drm_gem_object *obj = vma->vm_private_data; | 
|  | 1285 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 1286 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1287 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1288 | pgoff_t page_offset; | 
|  | 1289 | unsigned long pfn; | 
|  | 1290 | int ret = 0; | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 1291 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1292 |  | 
|  | 1293 | /* We don't use vmf->pgoff since that has the fake offset */ | 
|  | 1294 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 
|  | 1295 | PAGE_SHIFT; | 
|  | 1296 |  | 
|  | 1297 | /* Now bind it into the GTT if needed */ | 
|  | 1298 | mutex_lock(&dev->struct_mutex); | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1299 | BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1300 |  | 
|  | 1301 | if (obj_priv->gtt_space) { | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 1302 | if (!obj_priv->map_and_fenceable) { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1303 | ret = i915_gem_object_unbind(obj); | 
|  | 1304 | if (ret) | 
|  | 1305 | goto unlock; | 
|  | 1306 | } | 
|  | 1307 | } | 
| Daniel Vetter | 16e809a | 2010-09-16 19:37:04 +0200 | [diff] [blame] | 1308 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1309 | if (!obj_priv->gtt_space) { | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 1310 | ret = i915_gem_object_bind_to_gtt(obj, 0, true); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1311 | if (ret) | 
|  | 1312 | goto unlock; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1313 | } | 
|  | 1314 |  | 
| Chris Wilson | 4a684a4 | 2010-10-28 14:44:08 +0100 | [diff] [blame] | 1315 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 
|  | 1316 | if (ret) | 
|  | 1317 | goto unlock; | 
|  | 1318 |  | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1319 | if (!obj_priv->fault_mappable) { | 
|  | 1320 | obj_priv->fault_mappable = true; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1321 | i915_gem_info_update_mappable(dev_priv, obj_priv, true); | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1322 | } | 
|  | 1323 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1324 | /* Need a new fence register? */ | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1325 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 1326 | ret = i915_gem_object_get_fence_reg(obj, true); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1327 | if (ret) | 
|  | 1328 | goto unlock; | 
| Eric Anholt | d9ddcb9 | 2009-01-27 10:33:49 -0800 | [diff] [blame] | 1329 | } | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1330 |  | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 1331 | if (i915_gem_object_is_inactive(obj_priv)) | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1332 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 
| Chris Wilson | 7d1c480 | 2010-08-07 21:45:03 +0100 | [diff] [blame] | 1333 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1334 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 
|  | 1335 | page_offset; | 
|  | 1336 |  | 
|  | 1337 | /* Finally, remap it using the new GTT offset */ | 
|  | 1338 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1339 | unlock: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1340 | mutex_unlock(&dev->struct_mutex); | 
|  | 1341 |  | 
|  | 1342 | switch (ret) { | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1343 | case 0: | 
|  | 1344 | case -ERESTARTSYS: | 
|  | 1345 | return VM_FAULT_NOPAGE; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1346 | case -ENOMEM: | 
|  | 1347 | case -EAGAIN: | 
|  | 1348 | return VM_FAULT_OOM; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1349 | default: | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1350 | return VM_FAULT_SIGBUS; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1351 | } | 
|  | 1352 | } | 
|  | 1353 |  | 
|  | 1354 | /** | 
|  | 1355 | * i915_gem_create_mmap_offset - create a fake mmap offset for an object | 
|  | 1356 | * @obj: obj in question | 
|  | 1357 | * | 
|  | 1358 | * GEM memory mapping works by handing back to userspace a fake mmap offset | 
|  | 1359 | * it can use in a subsequent mmap(2) call.  The DRM core code then looks | 
|  | 1360 | * up the object based on the offset and sets up the various memory mapping | 
|  | 1361 | * structures. | 
|  | 1362 | * | 
|  | 1363 | * This routine allocates and attaches a fake offset for @obj. | 
|  | 1364 | */ | 
|  | 1365 | static int | 
|  | 1366 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | 
|  | 1367 | { | 
|  | 1368 | struct drm_device *dev = obj->dev; | 
|  | 1369 | struct drm_gem_mm *mm = dev->mm_private; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1370 | struct drm_map_list *list; | 
| Benjamin Herrenschmidt | f77d390 | 2009-02-02 16:55:46 +1100 | [diff] [blame] | 1371 | struct drm_local_map *map; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1372 | int ret = 0; | 
|  | 1373 |  | 
|  | 1374 | /* Set the object up for mmap'ing */ | 
|  | 1375 | list = &obj->map_list; | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1376 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1377 | if (!list->map) | 
|  | 1378 | return -ENOMEM; | 
|  | 1379 |  | 
|  | 1380 | map = list->map; | 
|  | 1381 | map->type = _DRM_GEM; | 
|  | 1382 | map->size = obj->size; | 
|  | 1383 | map->handle = obj; | 
|  | 1384 |  | 
|  | 1385 | /* Get a DRM GEM mmap offset allocated... */ | 
|  | 1386 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 
|  | 1387 | obj->size / PAGE_SIZE, 0, 0); | 
|  | 1388 | if (!list->file_offset_node) { | 
|  | 1389 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 
| Chris Wilson | 9e0ae534 | 2010-09-21 15:05:24 +0100 | [diff] [blame] | 1390 | ret = -ENOSPC; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1391 | goto out_free_list; | 
|  | 1392 | } | 
|  | 1393 |  | 
|  | 1394 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 
|  | 1395 | obj->size / PAGE_SIZE, 0); | 
|  | 1396 | if (!list->file_offset_node) { | 
|  | 1397 | ret = -ENOMEM; | 
|  | 1398 | goto out_free_list; | 
|  | 1399 | } | 
|  | 1400 |  | 
|  | 1401 | list->hash.key = list->file_offset_node->start; | 
| Chris Wilson | 9e0ae534 | 2010-09-21 15:05:24 +0100 | [diff] [blame] | 1402 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); | 
|  | 1403 | if (ret) { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1404 | DRM_ERROR("failed to add to map hash\n"); | 
|  | 1405 | goto out_free_mm; | 
|  | 1406 | } | 
|  | 1407 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1408 | return 0; | 
|  | 1409 |  | 
|  | 1410 | out_free_mm: | 
|  | 1411 | drm_mm_put_block(list->file_offset_node); | 
|  | 1412 | out_free_list: | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1413 | kfree(list->map); | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1414 | list->map = NULL; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1415 |  | 
|  | 1416 | return ret; | 
|  | 1417 | } | 
|  | 1418 |  | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1419 | /** | 
|  | 1420 | * i915_gem_release_mmap - remove physical page mappings | 
|  | 1421 | * @obj: obj in question | 
|  | 1422 | * | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 1423 | * Preserve the reservation of the mmapping with the DRM core code, but | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1424 | * relinquish ownership of the pages back to the system. | 
|  | 1425 | * | 
|  | 1426 | * It is vital that we remove the page mapping if we have mapped a tiled | 
|  | 1427 | * object through the GTT and then lose the fence register due to | 
|  | 1428 | * resource pressure. Similarly if the object has been moved out of the | 
|  | 1429 | * aperture, than pages mapped into userspace must be revoked. Removing the | 
|  | 1430 | * mapping will then trigger a page fault on the next user access, allowing | 
|  | 1431 | * fixup by i915_gem_fault(). | 
|  | 1432 | */ | 
| Eric Anholt | d05ca30 | 2009-07-10 13:02:26 -0700 | [diff] [blame] | 1433 | void | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1434 | i915_gem_release_mmap(struct drm_gem_object *obj) | 
|  | 1435 | { | 
|  | 1436 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1437 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1438 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1439 |  | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1440 | if (unlikely(obj->map_list.map && dev->dev_mapping)) | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1441 | unmap_mapping_range(dev->dev_mapping, | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1442 | (loff_t)obj->map_list.hash.key<<PAGE_SHIFT, | 
|  | 1443 | obj->size, 1); | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1444 |  | 
|  | 1445 | if (obj_priv->fault_mappable) { | 
|  | 1446 | obj_priv->fault_mappable = false; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1447 | i915_gem_info_update_mappable(dev_priv, obj_priv, false); | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 1448 | } | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1449 | } | 
|  | 1450 |  | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1451 | static void | 
|  | 1452 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 
|  | 1453 | { | 
|  | 1454 | struct drm_device *dev = obj->dev; | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1455 | struct drm_gem_mm *mm = dev->mm_private; | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1456 | struct drm_map_list *list = &obj->map_list; | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1457 |  | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1458 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1459 | drm_mm_put_block(list->file_offset_node); | 
|  | 1460 | kfree(list->map); | 
|  | 1461 | list->map = NULL; | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1462 | } | 
|  | 1463 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1464 | /** | 
|  | 1465 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 
|  | 1466 | * @obj: object to check | 
|  | 1467 | * | 
|  | 1468 | * Return the required GTT alignment for an object, taking into account | 
|  | 1469 | * potential fence register mapping if needed. | 
|  | 1470 | */ | 
|  | 1471 | static uint32_t | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1472 | i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1473 | { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1474 | struct drm_device *dev = obj_priv->base.dev; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1475 |  | 
|  | 1476 | /* | 
|  | 1477 | * Minimum alignment is 4k (GTT page size), but might be greater | 
|  | 1478 | * if a fence register is needed for the object. | 
|  | 1479 | */ | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1480 | if (INTEL_INFO(dev)->gen >= 4 || | 
|  | 1481 | obj_priv->tiling_mode == I915_TILING_NONE) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1482 | return 4096; | 
|  | 1483 |  | 
|  | 1484 | /* | 
|  | 1485 | * Previous chips need to be aligned to the size of the smallest | 
|  | 1486 | * fence register that can contain the object. | 
|  | 1487 | */ | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1488 | return i915_gem_get_gtt_size(obj_priv); | 
|  | 1489 | } | 
|  | 1490 |  | 
|  | 1491 | static uint32_t | 
|  | 1492 | i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) | 
|  | 1493 | { | 
|  | 1494 | struct drm_device *dev = obj_priv->base.dev; | 
|  | 1495 | uint32_t size; | 
|  | 1496 |  | 
|  | 1497 | /* | 
|  | 1498 | * Minimum alignment is 4k (GTT page size), but might be greater | 
|  | 1499 | * if a fence register is needed for the object. | 
|  | 1500 | */ | 
|  | 1501 | if (INTEL_INFO(dev)->gen >= 4) | 
|  | 1502 | return obj_priv->base.size; | 
|  | 1503 |  | 
|  | 1504 | /* | 
|  | 1505 | * Previous chips need to be aligned to the size of the smallest | 
|  | 1506 | * fence register that can contain the object. | 
|  | 1507 | */ | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 1508 | if (INTEL_INFO(dev)->gen == 3) | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1509 | size = 1024*1024; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1510 | else | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1511 | size = 512*1024; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1512 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1513 | while (size < obj_priv->base.size) | 
|  | 1514 | size <<= 1; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1515 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 1516 | return size; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1517 | } | 
|  | 1518 |  | 
|  | 1519 | /** | 
|  | 1520 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 
|  | 1521 | * @dev: DRM device | 
|  | 1522 | * @data: GTT mapping ioctl data | 
|  | 1523 | * @file_priv: GEM object info | 
|  | 1524 | * | 
|  | 1525 | * Simply returns the fake offset to userspace so it can mmap it. | 
|  | 1526 | * The mmap call will end up in drm_gem_mmap(), which will set things | 
|  | 1527 | * up so we can get faults in the handler above. | 
|  | 1528 | * | 
|  | 1529 | * The fault handler will take care of binding the object into the GTT | 
|  | 1530 | * (since it may have been evicted to make room for something), allocating | 
|  | 1531 | * a fence register, and mapping the appropriate aperture address into | 
|  | 1532 | * userspace. | 
|  | 1533 | */ | 
|  | 1534 | int | 
|  | 1535 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 
|  | 1536 | struct drm_file *file_priv) | 
|  | 1537 | { | 
| Chris Wilson | da761a6 | 2010-10-27 17:37:08 +0100 | [diff] [blame] | 1538 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1539 | struct drm_i915_gem_mmap_gtt *args = data; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1540 | struct drm_gem_object *obj; | 
|  | 1541 | struct drm_i915_gem_object *obj_priv; | 
|  | 1542 | int ret; | 
|  | 1543 |  | 
|  | 1544 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1545 | return -ENODEV; | 
|  | 1546 |  | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1547 | ret = i915_mutex_lock_interruptible(dev); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1548 | if (ret) | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 1549 | return ret; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1550 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1551 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1552 | if (obj == NULL) { | 
|  | 1553 | ret = -ENOENT; | 
|  | 1554 | goto unlock; | 
|  | 1555 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1556 | obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1557 |  | 
| Chris Wilson | da761a6 | 2010-10-27 17:37:08 +0100 | [diff] [blame] | 1558 | if (obj->size > dev_priv->mm.gtt_mappable_end) { | 
|  | 1559 | ret = -E2BIG; | 
|  | 1560 | goto unlock; | 
|  | 1561 | } | 
|  | 1562 |  | 
| Chris Wilson | ab18282 | 2009-09-22 18:46:17 +0100 | [diff] [blame] | 1563 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 
|  | 1564 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1565 | ret = -EINVAL; | 
|  | 1566 | goto out; | 
| Chris Wilson | ab18282 | 2009-09-22 18:46:17 +0100 | [diff] [blame] | 1567 | } | 
|  | 1568 |  | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1569 | if (!obj->map_list.map) { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1570 | ret = i915_gem_create_mmap_offset(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1571 | if (ret) | 
|  | 1572 | goto out; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1573 | } | 
|  | 1574 |  | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 1575 | args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1576 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1577 | out: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1578 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1579 | unlock: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1580 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 1581 | return ret; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1582 | } | 
|  | 1583 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 1584 | static int | 
|  | 1585 | i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, | 
|  | 1586 | gfp_t gfpmask) | 
|  | 1587 | { | 
|  | 1588 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
|  | 1589 | int page_count, i; | 
|  | 1590 | struct address_space *mapping; | 
|  | 1591 | struct inode *inode; | 
|  | 1592 | struct page *page; | 
|  | 1593 |  | 
|  | 1594 | /* Get the list of pages out of our struct file.  They'll be pinned | 
|  | 1595 | * at this point until we release them. | 
|  | 1596 | */ | 
|  | 1597 | page_count = obj->size / PAGE_SIZE; | 
|  | 1598 | BUG_ON(obj_priv->pages != NULL); | 
|  | 1599 | obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); | 
|  | 1600 | if (obj_priv->pages == NULL) | 
|  | 1601 | return -ENOMEM; | 
|  | 1602 |  | 
|  | 1603 | inode = obj->filp->f_path.dentry->d_inode; | 
|  | 1604 | mapping = inode->i_mapping; | 
|  | 1605 | for (i = 0; i < page_count; i++) { | 
|  | 1606 | page = read_cache_page_gfp(mapping, i, | 
|  | 1607 | GFP_HIGHUSER | | 
|  | 1608 | __GFP_COLD | | 
|  | 1609 | __GFP_RECLAIMABLE | | 
|  | 1610 | gfpmask); | 
|  | 1611 | if (IS_ERR(page)) | 
|  | 1612 | goto err_pages; | 
|  | 1613 |  | 
|  | 1614 | obj_priv->pages[i] = page; | 
|  | 1615 | } | 
|  | 1616 |  | 
|  | 1617 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 
|  | 1618 | i915_gem_object_do_bit_17_swizzle(obj); | 
|  | 1619 |  | 
|  | 1620 | return 0; | 
|  | 1621 |  | 
|  | 1622 | err_pages: | 
|  | 1623 | while (i--) | 
|  | 1624 | page_cache_release(obj_priv->pages[i]); | 
|  | 1625 |  | 
|  | 1626 | drm_free_large(obj_priv->pages); | 
|  | 1627 | obj_priv->pages = NULL; | 
|  | 1628 | return PTR_ERR(page); | 
|  | 1629 | } | 
|  | 1630 |  | 
| Chris Wilson | 5cdf588 | 2010-09-27 15:51:07 +0100 | [diff] [blame] | 1631 | static void | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 1632 | i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1633 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1634 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1635 | int page_count = obj->size / PAGE_SIZE; | 
|  | 1636 | int i; | 
|  | 1637 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1638 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1639 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 1640 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 
|  | 1641 | i915_gem_object_save_bit_17_swizzle(obj); | 
|  | 1642 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1643 | if (obj_priv->madv == I915_MADV_DONTNEED) | 
| Chris Wilson | 13a05fd | 2009-09-20 23:03:19 +0100 | [diff] [blame] | 1644 | obj_priv->dirty = 0; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1645 |  | 
|  | 1646 | for (i = 0; i < page_count; i++) { | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1647 | if (obj_priv->dirty) | 
|  | 1648 | set_page_dirty(obj_priv->pages[i]); | 
|  | 1649 |  | 
|  | 1650 | if (obj_priv->madv == I915_MADV_WILLNEED) | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1651 | mark_page_accessed(obj_priv->pages[i]); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1652 |  | 
|  | 1653 | page_cache_release(obj_priv->pages[i]); | 
|  | 1654 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1655 | obj_priv->dirty = 0; | 
|  | 1656 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 1657 | drm_free_large(obj_priv->pages); | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1658 | obj_priv->pages = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1659 | } | 
|  | 1660 |  | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 1661 | static uint32_t | 
|  | 1662 | i915_gem_next_request_seqno(struct drm_device *dev, | 
|  | 1663 | struct intel_ring_buffer *ring) | 
|  | 1664 | { | 
|  | 1665 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1666 |  | 
|  | 1667 | ring->outstanding_lazy_request = true; | 
|  | 1668 | return dev_priv->next_seqno; | 
|  | 1669 | } | 
|  | 1670 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1671 | static void | 
| Daniel Vetter | 617dbe2 | 2010-02-11 22:16:02 +0100 | [diff] [blame] | 1672 | i915_gem_object_move_to_active(struct drm_gem_object *obj, | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1673 | struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1674 | { | 
|  | 1675 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1676 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1677 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 1678 | uint32_t seqno = i915_gem_next_request_seqno(dev, ring); | 
| Daniel Vetter | 617dbe2 | 2010-02-11 22:16:02 +0100 | [diff] [blame] | 1679 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1680 | BUG_ON(ring == NULL); | 
|  | 1681 | obj_priv->ring = ring; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1682 |  | 
|  | 1683 | /* Add a reference if we're newly entering the active list. */ | 
|  | 1684 | if (!obj_priv->active) { | 
|  | 1685 | drm_gem_object_reference(obj); | 
|  | 1686 | obj_priv->active = 1; | 
|  | 1687 | } | 
| Daniel Vetter | e35a41d | 2010-02-11 22:13:59 +0100 | [diff] [blame] | 1688 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1689 | /* Move from whatever list we were on to the tail of execution. */ | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1690 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); | 
|  | 1691 | list_move_tail(&obj_priv->ring_list, &ring->active_list); | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1692 | obj_priv->last_rendering_seqno = seqno; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1693 | } | 
|  | 1694 |  | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1695 | static void | 
|  | 1696 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | 
|  | 1697 | { | 
|  | 1698 | struct drm_device *dev = obj->dev; | 
|  | 1699 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1700 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1701 |  | 
|  | 1702 | BUG_ON(!obj_priv->active); | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1703 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); | 
|  | 1704 | list_del_init(&obj_priv->ring_list); | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1705 | obj_priv->last_rendering_seqno = 0; | 
|  | 1706 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1707 |  | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 1708 | /* Immediately discard the backing storage */ | 
|  | 1709 | static void | 
|  | 1710 | i915_gem_object_truncate(struct drm_gem_object *obj) | 
|  | 1711 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1712 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1713 | struct inode *inode; | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 1714 |  | 
| Chris Wilson | ae9fed6 | 2010-08-07 11:01:30 +0100 | [diff] [blame] | 1715 | /* Our goal here is to return as much of the memory as | 
|  | 1716 | * is possible back to the system as we are called from OOM. | 
|  | 1717 | * To do this we must instruct the shmfs to drop all of its | 
|  | 1718 | * backing pages, *now*. Here we mirror the actions taken | 
|  | 1719 | * when by shmem_delete_inode() to release the backing store. | 
|  | 1720 | */ | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1721 | inode = obj->filp->f_path.dentry->d_inode; | 
| Chris Wilson | ae9fed6 | 2010-08-07 11:01:30 +0100 | [diff] [blame] | 1722 | truncate_inode_pages(inode->i_mapping, 0); | 
|  | 1723 | if (inode->i_op->truncate_range) | 
|  | 1724 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1725 |  | 
|  | 1726 | obj_priv->madv = __I915_MADV_PURGED; | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 1727 | } | 
|  | 1728 |  | 
|  | 1729 | static inline int | 
|  | 1730 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | 
|  | 1731 | { | 
|  | 1732 | return obj_priv->madv == I915_MADV_DONTNEED; | 
|  | 1733 | } | 
|  | 1734 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1735 | static void | 
|  | 1736 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 
|  | 1737 | { | 
|  | 1738 | struct drm_device *dev = obj->dev; | 
|  | 1739 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1740 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1741 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1742 | if (obj_priv->pin_count != 0) | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1743 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1744 | else | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1745 | list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 
|  | 1746 | list_del_init(&obj_priv->ring_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1747 |  | 
| Daniel Vetter | 99fcb76 | 2010-02-07 16:20:18 +0100 | [diff] [blame] | 1748 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | 
|  | 1749 |  | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1750 | obj_priv->last_rendering_seqno = 0; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1751 | obj_priv->ring = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1752 | if (obj_priv->active) { | 
|  | 1753 | obj_priv->active = 0; | 
|  | 1754 | drm_gem_object_unreference(obj); | 
|  | 1755 | } | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 1756 | WARN_ON(i915_verify_lists(dev)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1757 | } | 
|  | 1758 |  | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1759 | static void | 
|  | 1760 | i915_gem_process_flushing_list(struct drm_device *dev, | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 1761 | uint32_t flush_domains, | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1762 | struct intel_ring_buffer *ring) | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1763 | { | 
|  | 1764 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1765 | struct drm_i915_gem_object *obj_priv, *next; | 
|  | 1766 |  | 
|  | 1767 | list_for_each_entry_safe(obj_priv, next, | 
| Chris Wilson | 6419340 | 2010-10-24 12:38:05 +0100 | [diff] [blame] | 1768 | &ring->gpu_write_list, | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1769 | gpu_write_list) { | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 1770 | struct drm_gem_object *obj = &obj_priv->base; | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1771 |  | 
| Chris Wilson | 6419340 | 2010-10-24 12:38:05 +0100 | [diff] [blame] | 1772 | if (obj->write_domain & flush_domains) { | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1773 | uint32_t old_write_domain = obj->write_domain; | 
|  | 1774 |  | 
|  | 1775 | obj->write_domain = 0; | 
|  | 1776 | list_del_init(&obj_priv->gpu_write_list); | 
| Daniel Vetter | 617dbe2 | 2010-02-11 22:16:02 +0100 | [diff] [blame] | 1777 | i915_gem_object_move_to_active(obj, ring); | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1778 |  | 
|  | 1779 | /* update the fence lru list */ | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 1780 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 
|  | 1781 | struct drm_i915_fence_reg *reg = | 
|  | 1782 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 1783 | list_move_tail(®->lru_list, | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1784 | &dev_priv->mm.fence_list); | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 1785 | } | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1786 |  | 
|  | 1787 | trace_i915_gem_object_change_domain(obj, | 
|  | 1788 | obj->read_domains, | 
|  | 1789 | old_write_domain); | 
|  | 1790 | } | 
|  | 1791 | } | 
|  | 1792 | } | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1793 |  | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 1794 | int | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 1795 | i915_add_request(struct drm_device *dev, | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1796 | struct drm_file *file, | 
| Chris Wilson | 8dc5d14 | 2010-08-12 12:36:12 +0100 | [diff] [blame] | 1797 | struct drm_i915_gem_request *request, | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 1798 | struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1799 | { | 
|  | 1800 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1801 | struct drm_i915_file_private *file_priv = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1802 | uint32_t seqno; | 
|  | 1803 | int was_empty; | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 1804 | int ret; | 
|  | 1805 |  | 
|  | 1806 | BUG_ON(request == NULL); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1807 |  | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1808 | if (file != NULL) | 
|  | 1809 | file_priv = file->driver_priv; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1810 |  | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 1811 | ret = ring->add_request(ring, &seqno); | 
|  | 1812 | if (ret) | 
|  | 1813 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1814 |  | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 1815 | ring->outstanding_lazy_request = false; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1816 |  | 
|  | 1817 | request->seqno = seqno; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1818 | request->ring = ring; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1819 | request->emitted_jiffies = jiffies; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1820 | was_empty = list_empty(&ring->request_list); | 
|  | 1821 | list_add_tail(&request->list, &ring->request_list); | 
|  | 1822 |  | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1823 | if (file_priv) { | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1824 | spin_lock(&file_priv->mm.lock); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1825 | request->file_priv = file_priv; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1826 | list_add_tail(&request->client_list, | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1827 | &file_priv->mm.request_list); | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1828 | spin_unlock(&file_priv->mm.lock); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1829 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1830 |  | 
| Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1831 | if (!dev_priv->mm.suspended) { | 
| Chris Wilson | b3b079d | 2010-09-13 23:44:34 +0100 | [diff] [blame] | 1832 | mod_timer(&dev_priv->hangcheck_timer, | 
|  | 1833 | jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); | 
| Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1834 | if (was_empty) | 
| Chris Wilson | b3b079d | 2010-09-13 23:44:34 +0100 | [diff] [blame] | 1835 | queue_delayed_work(dev_priv->wq, | 
|  | 1836 | &dev_priv->mm.retire_work, HZ); | 
| Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1837 | } | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 1838 | return 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1839 | } | 
|  | 1840 |  | 
|  | 1841 | /** | 
|  | 1842 | * Command execution barrier | 
|  | 1843 | * | 
|  | 1844 | * Ensures that all commands in the ring are finished | 
|  | 1845 | * before signalling the CPU | 
|  | 1846 | */ | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 1847 | static void | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1848 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1849 | { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1850 | uint32_t flush_domains = 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1851 |  | 
|  | 1852 | /* The sampler always gets flushed on i965 (sigh) */ | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 1853 | if (INTEL_INFO(dev)->gen >= 4) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1854 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1855 |  | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 1856 | ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1857 | } | 
|  | 1858 |  | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1859 | static inline void | 
|  | 1860 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1861 | { | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1862 | struct drm_i915_file_private *file_priv = request->file_priv; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1863 |  | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1864 | if (!file_priv) | 
|  | 1865 | return; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 1866 |  | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 1867 | spin_lock(&file_priv->mm.lock); | 
|  | 1868 | list_del(&request->client_list); | 
|  | 1869 | request->file_priv = NULL; | 
|  | 1870 | spin_unlock(&file_priv->mm.lock); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1871 | } | 
|  | 1872 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1873 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | 
|  | 1874 | struct intel_ring_buffer *ring) | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 1875 | { | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1876 | while (!list_empty(&ring->request_list)) { | 
|  | 1877 | struct drm_i915_gem_request *request; | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 1878 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1879 | request = list_first_entry(&ring->request_list, | 
|  | 1880 | struct drm_i915_gem_request, | 
|  | 1881 | list); | 
|  | 1882 |  | 
|  | 1883 | list_del(&request->list); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1884 | i915_gem_request_remove_from_client(request); | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1885 | kfree(request); | 
|  | 1886 | } | 
|  | 1887 |  | 
|  | 1888 | while (!list_empty(&ring->active_list)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1889 | struct drm_i915_gem_object *obj_priv; | 
|  | 1890 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1891 | obj_priv = list_first_entry(&ring->active_list, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1892 | struct drm_i915_gem_object, | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1893 | ring_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1894 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1895 | obj_priv->base.write_domain = 0; | 
|  | 1896 | list_del_init(&obj_priv->gpu_write_list); | 
|  | 1897 | i915_gem_object_move_to_inactive(&obj_priv->base); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1898 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1899 | } | 
|  | 1900 |  | 
| Chris Wilson | 069efc1 | 2010-09-30 16:53:18 +0100 | [diff] [blame] | 1901 | void i915_gem_reset(struct drm_device *dev) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1902 | { | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1903 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 1904 | struct drm_i915_gem_object *obj_priv; | 
| Chris Wilson | 069efc1 | 2010-09-30 16:53:18 +0100 | [diff] [blame] | 1905 | int i; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1906 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1907 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); | 
| Chris Wilson | 87acb0a | 2010-10-19 10:13:00 +0100 | [diff] [blame] | 1908 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 1909 | i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1910 |  | 
|  | 1911 | /* Remove anything from the flushing lists. The GPU cache is likely | 
|  | 1912 | * to be lost on reset along with the data, so simply move the | 
|  | 1913 | * lost bo to the inactive list. | 
|  | 1914 | */ | 
|  | 1915 | while (!list_empty(&dev_priv->mm.flushing_list)) { | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 1916 | obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | 
|  | 1917 | struct drm_i915_gem_object, | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1918 | mm_list); | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 1919 |  | 
|  | 1920 | obj_priv->base.write_domain = 0; | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1921 | list_del_init(&obj_priv->gpu_write_list); | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 1922 | i915_gem_object_move_to_inactive(&obj_priv->base); | 
|  | 1923 | } | 
| Chris Wilson | 9375e44 | 2010-09-19 12:21:28 +0100 | [diff] [blame] | 1924 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1925 | /* Move everything out of the GPU domains to ensure we do any | 
|  | 1926 | * necessary invalidation upon reuse. | 
|  | 1927 | */ | 
| Chris Wilson | 77f0123 | 2010-09-19 12:31:36 +0100 | [diff] [blame] | 1928 | list_for_each_entry(obj_priv, | 
|  | 1929 | &dev_priv->mm.inactive_list, | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1930 | mm_list) | 
| Chris Wilson | 77f0123 | 2010-09-19 12:31:36 +0100 | [diff] [blame] | 1931 | { | 
|  | 1932 | obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 
|  | 1933 | } | 
| Chris Wilson | 069efc1 | 2010-09-30 16:53:18 +0100 | [diff] [blame] | 1934 |  | 
|  | 1935 | /* The fence registers are invalidated so clear them out */ | 
|  | 1936 | for (i = 0; i < 16; i++) { | 
|  | 1937 | struct drm_i915_fence_reg *reg; | 
|  | 1938 |  | 
|  | 1939 | reg = &dev_priv->fence_regs[i]; | 
|  | 1940 | if (!reg->obj) | 
|  | 1941 | continue; | 
|  | 1942 |  | 
|  | 1943 | i915_gem_clear_fence_reg(reg->obj); | 
|  | 1944 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1945 | } | 
|  | 1946 |  | 
|  | 1947 | /** | 
|  | 1948 | * This function clears the request list as sequence numbers are passed. | 
|  | 1949 | */ | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1950 | static void | 
|  | 1951 | i915_gem_retire_requests_ring(struct drm_device *dev, | 
|  | 1952 | struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1953 | { | 
|  | 1954 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1955 | uint32_t seqno; | 
|  | 1956 |  | 
| Chris Wilson | b84d5f0 | 2010-09-18 01:38:04 +0100 | [diff] [blame] | 1957 | if (!ring->status_page.page_addr || | 
|  | 1958 | list_empty(&ring->request_list)) | 
| Karsten Wiese | 6c0594a | 2009-02-23 15:07:57 +0100 | [diff] [blame] | 1959 | return; | 
|  | 1960 |  | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 1961 | WARN_ON(i915_verify_lists(dev)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1962 |  | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 1963 | seqno = ring->get_seqno(ring); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1964 | while (!list_empty(&ring->request_list)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1965 | struct drm_i915_gem_request *request; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1966 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1967 | request = list_first_entry(&ring->request_list, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1968 | struct drm_i915_gem_request, | 
|  | 1969 | list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1970 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1971 | if (!i915_seqno_passed(seqno, request->seqno)) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1972 | break; | 
| Chris Wilson | b84d5f0 | 2010-09-18 01:38:04 +0100 | [diff] [blame] | 1973 |  | 
|  | 1974 | trace_i915_gem_request_retire(dev, request->seqno); | 
|  | 1975 |  | 
|  | 1976 | list_del(&request->list); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 1977 | i915_gem_request_remove_from_client(request); | 
| Chris Wilson | b84d5f0 | 2010-09-18 01:38:04 +0100 | [diff] [blame] | 1978 | kfree(request); | 
|  | 1979 | } | 
|  | 1980 |  | 
|  | 1981 | /* Move any buffers on the active list that are no longer referenced | 
|  | 1982 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 
|  | 1983 | */ | 
|  | 1984 | while (!list_empty(&ring->active_list)) { | 
|  | 1985 | struct drm_gem_object *obj; | 
|  | 1986 | struct drm_i915_gem_object *obj_priv; | 
|  | 1987 |  | 
|  | 1988 | obj_priv = list_first_entry(&ring->active_list, | 
|  | 1989 | struct drm_i915_gem_object, | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 1990 | ring_list); | 
| Chris Wilson | b84d5f0 | 2010-09-18 01:38:04 +0100 | [diff] [blame] | 1991 |  | 
| Chris Wilson | dfaae39 | 2010-09-22 10:31:52 +0100 | [diff] [blame] | 1992 | if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) | 
| Chris Wilson | b84d5f0 | 2010-09-18 01:38:04 +0100 | [diff] [blame] | 1993 | break; | 
|  | 1994 |  | 
|  | 1995 | obj = &obj_priv->base; | 
| Chris Wilson | b84d5f0 | 2010-09-18 01:38:04 +0100 | [diff] [blame] | 1996 | if (obj->write_domain != 0) | 
|  | 1997 | i915_gem_object_move_to_flushing(obj); | 
|  | 1998 | else | 
|  | 1999 | i915_gem_object_move_to_inactive(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2000 | } | 
| Chris Wilson | 9d34e5d | 2009-09-24 05:26:06 +0100 | [diff] [blame] | 2001 |  | 
|  | 2002 | if (unlikely (dev_priv->trace_irq_seqno && | 
|  | 2003 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2004 | ring->user_irq_put(ring); | 
| Chris Wilson | 9d34e5d | 2009-09-24 05:26:06 +0100 | [diff] [blame] | 2005 | dev_priv->trace_irq_seqno = 0; | 
|  | 2006 | } | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 2007 |  | 
|  | 2008 | WARN_ON(i915_verify_lists(dev)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2009 | } | 
|  | 2010 |  | 
|  | 2011 | void | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 2012 | i915_gem_retire_requests(struct drm_device *dev) | 
|  | 2013 | { | 
|  | 2014 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 2015 |  | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 2016 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | 
|  | 2017 | struct drm_i915_gem_object *obj_priv, *tmp; | 
|  | 2018 |  | 
|  | 2019 | /* We must be careful that during unbind() we do not | 
|  | 2020 | * accidentally infinitely recurse into retire requests. | 
|  | 2021 | * Currently: | 
|  | 2022 | *   retire -> free -> unbind -> wait -> retire_ring | 
|  | 2023 | */ | 
|  | 2024 | list_for_each_entry_safe(obj_priv, tmp, | 
|  | 2025 | &dev_priv->mm.deferred_free_list, | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 2026 | mm_list) | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 2027 | i915_gem_free_object_tail(&obj_priv->base); | 
|  | 2028 | } | 
|  | 2029 |  | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 2030 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 
| Chris Wilson | 87acb0a | 2010-10-19 10:13:00 +0100 | [diff] [blame] | 2031 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 2032 | i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 2033 | } | 
|  | 2034 |  | 
| Daniel Vetter | 75ef9da | 2010-08-21 00:25:16 +0200 | [diff] [blame] | 2035 | static void | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2036 | i915_gem_retire_work_handler(struct work_struct *work) | 
|  | 2037 | { | 
|  | 2038 | drm_i915_private_t *dev_priv; | 
|  | 2039 | struct drm_device *dev; | 
|  | 2040 |  | 
|  | 2041 | dev_priv = container_of(work, drm_i915_private_t, | 
|  | 2042 | mm.retire_work.work); | 
|  | 2043 | dev = dev_priv->dev; | 
|  | 2044 |  | 
| Chris Wilson | 891b48c | 2010-09-29 12:26:37 +0100 | [diff] [blame] | 2045 | /* Come back later if the device is busy... */ | 
|  | 2046 | if (!mutex_trylock(&dev->struct_mutex)) { | 
|  | 2047 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 
|  | 2048 | return; | 
|  | 2049 | } | 
|  | 2050 |  | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 2051 | i915_gem_retire_requests(dev); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2052 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 2053 | if (!dev_priv->mm.suspended && | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2054 | (!list_empty(&dev_priv->render_ring.request_list) || | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 2055 | !list_empty(&dev_priv->bsd_ring.request_list) || | 
|  | 2056 | !list_empty(&dev_priv->blt_ring.request_list))) | 
| Eric Anholt | 9c9fe1f | 2009-08-03 16:09:16 -0700 | [diff] [blame] | 2057 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2058 | mutex_unlock(&dev->struct_mutex); | 
|  | 2059 | } | 
|  | 2060 |  | 
| Daniel Vetter | 5a5a0c6 | 2009-09-15 22:57:36 +0200 | [diff] [blame] | 2061 | int | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2062 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 2063 | bool interruptible, struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2064 | { | 
|  | 2065 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Jesse Barnes | 802c7eb | 2009-05-05 16:03:48 -0700 | [diff] [blame] | 2066 | u32 ier; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2067 | int ret = 0; | 
|  | 2068 |  | 
|  | 2069 | BUG_ON(seqno == 0); | 
|  | 2070 |  | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 2071 | if (atomic_read(&dev_priv->mm.wedged)) | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 2072 | return -EAGAIN; | 
| Ben Gamari | ffed1d0 | 2009-09-14 17:48:41 -0400 | [diff] [blame] | 2073 |  | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 2074 | if (ring->outstanding_lazy_request) { | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 2075 | struct drm_i915_gem_request *request; | 
|  | 2076 |  | 
|  | 2077 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 
|  | 2078 | if (request == NULL) | 
| Daniel Vetter | e35a41d | 2010-02-11 22:13:59 +0100 | [diff] [blame] | 2079 | return -ENOMEM; | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 2080 |  | 
|  | 2081 | ret = i915_add_request(dev, NULL, request, ring); | 
|  | 2082 | if (ret) { | 
|  | 2083 | kfree(request); | 
|  | 2084 | return ret; | 
|  | 2085 | } | 
|  | 2086 |  | 
|  | 2087 | seqno = request->seqno; | 
| Daniel Vetter | e35a41d | 2010-02-11 22:13:59 +0100 | [diff] [blame] | 2088 | } | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 2089 | BUG_ON(seqno == dev_priv->next_seqno); | 
| Daniel Vetter | e35a41d | 2010-02-11 22:13:59 +0100 | [diff] [blame] | 2090 |  | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2091 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { | 
| Eric Anholt | bad720f | 2009-10-22 16:11:14 -0700 | [diff] [blame] | 2092 | if (HAS_PCH_SPLIT(dev)) | 
| Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 2093 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 
|  | 2094 | else | 
|  | 2095 | ier = I915_READ(IER); | 
| Jesse Barnes | 802c7eb | 2009-05-05 16:03:48 -0700 | [diff] [blame] | 2096 | if (!ier) { | 
|  | 2097 | DRM_ERROR("something (likely vbetool) disabled " | 
|  | 2098 | "interrupts, re-enabling\n"); | 
|  | 2099 | i915_driver_irq_preinstall(dev); | 
|  | 2100 | i915_driver_irq_postinstall(dev); | 
|  | 2101 | } | 
|  | 2102 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2103 | trace_i915_gem_request_wait_begin(dev, seqno); | 
|  | 2104 |  | 
| Chris Wilson | b222349 | 2010-10-27 15:27:33 +0100 | [diff] [blame] | 2105 | ring->waiting_seqno = seqno; | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2106 | ring->user_irq_get(ring); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 2107 | if (interruptible) | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2108 | ret = wait_event_interruptible(ring->irq_queue, | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2109 | i915_seqno_passed(ring->get_seqno(ring), seqno) | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2110 | || atomic_read(&dev_priv->mm.wedged)); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 2111 | else | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2112 | wait_event(ring->irq_queue, | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2113 | i915_seqno_passed(ring->get_seqno(ring), seqno) | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2114 | || atomic_read(&dev_priv->mm.wedged)); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 2115 |  | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2116 | ring->user_irq_put(ring); | 
| Chris Wilson | b222349 | 2010-10-27 15:27:33 +0100 | [diff] [blame] | 2117 | ring->waiting_seqno = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2118 |  | 
|  | 2119 | trace_i915_gem_request_wait_end(dev, seqno); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2120 | } | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 2121 | if (atomic_read(&dev_priv->mm.wedged)) | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 2122 | ret = -EAGAIN; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2123 |  | 
|  | 2124 | if (ret && ret != -ERESTARTSYS) | 
| Daniel Vetter | 8bff917 | 2010-02-11 22:19:40 +0100 | [diff] [blame] | 2125 | DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2126 | __func__, ret, seqno, ring->get_seqno(ring), | 
| Daniel Vetter | 8bff917 | 2010-02-11 22:19:40 +0100 | [diff] [blame] | 2127 | dev_priv->next_seqno); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2128 |  | 
|  | 2129 | /* Directly dispatch request retiring.  While we have the work queue | 
|  | 2130 | * to handle this, the waiter on a request often wants an associated | 
|  | 2131 | * buffer to have made it to the inactive list, and we would need | 
|  | 2132 | * a separate wait queue to handle that. | 
|  | 2133 | */ | 
|  | 2134 | if (ret == 0) | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 2135 | i915_gem_retire_requests_ring(dev, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2136 |  | 
|  | 2137 | return ret; | 
|  | 2138 | } | 
|  | 2139 |  | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 2140 | /** | 
|  | 2141 | * Waits for a sequence number to be signaled, and cleans up the | 
|  | 2142 | * request and object lists appropriately for that event. | 
|  | 2143 | */ | 
|  | 2144 | static int | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2145 | i915_wait_request(struct drm_device *dev, uint32_t seqno, | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 2146 | struct intel_ring_buffer *ring) | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 2147 | { | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2148 | return i915_do_wait_request(dev, seqno, 1, ring); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 2149 | } | 
|  | 2150 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2151 | static void | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2152 | i915_gem_flush_ring(struct drm_device *dev, | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 2153 | struct drm_file *file_priv, | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2154 | struct intel_ring_buffer *ring, | 
|  | 2155 | uint32_t invalidate_domains, | 
|  | 2156 | uint32_t flush_domains) | 
|  | 2157 | { | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 2158 | ring->flush(ring, invalidate_domains, flush_domains); | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2159 | i915_gem_process_flushing_list(dev, flush_domains, ring); | 
|  | 2160 | } | 
|  | 2161 |  | 
|  | 2162 | static void | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2163 | i915_gem_flush(struct drm_device *dev, | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 2164 | struct drm_file *file_priv, | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2165 | uint32_t invalidate_domains, | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2166 | uint32_t flush_domains, | 
|  | 2167 | uint32_t flush_rings) | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2168 | { | 
|  | 2169 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 8bff917 | 2010-02-11 22:19:40 +0100 | [diff] [blame] | 2170 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2171 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 
|  | 2172 | drm_agp_chipset_flush(dev); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2173 |  | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2174 | if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { | 
|  | 2175 | if (flush_rings & RING_RENDER) | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 2176 | i915_gem_flush_ring(dev, file_priv, | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2177 | &dev_priv->render_ring, | 
|  | 2178 | invalidate_domains, flush_domains); | 
|  | 2179 | if (flush_rings & RING_BSD) | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 2180 | i915_gem_flush_ring(dev, file_priv, | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2181 | &dev_priv->bsd_ring, | 
|  | 2182 | invalidate_domains, flush_domains); | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 2183 | if (flush_rings & RING_BLT) | 
|  | 2184 | i915_gem_flush_ring(dev, file_priv, | 
|  | 2185 | &dev_priv->blt_ring, | 
|  | 2186 | invalidate_domains, flush_domains); | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2187 | } | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 2188 | } | 
|  | 2189 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2190 | /** | 
|  | 2191 | * Ensures that all rendering to the object has completed and the object is | 
|  | 2192 | * safe to unbind from the GTT or access from the CPU. | 
|  | 2193 | */ | 
|  | 2194 | static int | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2195 | i915_gem_object_wait_rendering(struct drm_gem_object *obj, | 
|  | 2196 | bool interruptible) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2197 | { | 
|  | 2198 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2199 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2200 | int ret; | 
|  | 2201 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2202 | /* This function only exists to support waiting for existing rendering, | 
|  | 2203 | * not for emitting required flushes. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2204 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2205 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2206 |  | 
|  | 2207 | /* If there is rendering queued on the buffer being evicted, wait for | 
|  | 2208 | * it. | 
|  | 2209 | */ | 
|  | 2210 | if (obj_priv->active) { | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2211 | ret = i915_do_wait_request(dev, | 
|  | 2212 | obj_priv->last_rendering_seqno, | 
|  | 2213 | interruptible, | 
|  | 2214 | obj_priv->ring); | 
|  | 2215 | if (ret) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2216 | return ret; | 
|  | 2217 | } | 
|  | 2218 |  | 
|  | 2219 | return 0; | 
|  | 2220 | } | 
|  | 2221 |  | 
|  | 2222 | /** | 
|  | 2223 | * Unbinds an object from the GTT aperture. | 
|  | 2224 | */ | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2225 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2226 | i915_gem_object_unbind(struct drm_gem_object *obj) | 
|  | 2227 | { | 
|  | 2228 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 2229 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2230 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2231 | int ret = 0; | 
|  | 2232 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2233 | if (obj_priv->gtt_space == NULL) | 
|  | 2234 | return 0; | 
|  | 2235 |  | 
|  | 2236 | if (obj_priv->pin_count != 0) { | 
|  | 2237 | DRM_ERROR("Attempting to unbind pinned buffer\n"); | 
|  | 2238 | return -EINVAL; | 
|  | 2239 | } | 
|  | 2240 |  | 
| Eric Anholt | 5323fd0 | 2009-09-09 11:50:45 -0700 | [diff] [blame] | 2241 | /* blow away mappings if mapped through GTT */ | 
|  | 2242 | i915_gem_release_mmap(obj); | 
|  | 2243 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2244 | /* Move the object to the CPU domain to ensure that | 
|  | 2245 | * any possible CPU writes while it's not in the GTT | 
|  | 2246 | * are flushed when we go to remap it. This will | 
|  | 2247 | * also ensure that all pending GPU writes are finished | 
|  | 2248 | * before we unbind. | 
|  | 2249 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2250 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 
| Chris Wilson | 8dc1775 | 2010-07-23 23:18:51 +0100 | [diff] [blame] | 2251 | if (ret == -ERESTARTSYS) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2252 | return ret; | 
| Chris Wilson | 8dc1775 | 2010-07-23 23:18:51 +0100 | [diff] [blame] | 2253 | /* Continue on if we fail due to EIO, the GPU is hung so we | 
|  | 2254 | * should be safe and we need to cleanup or else we might | 
|  | 2255 | * cause memory corruption through use-after-free. | 
|  | 2256 | */ | 
| Chris Wilson | 812ed492 | 2010-09-30 15:08:57 +0100 | [diff] [blame] | 2257 | if (ret) { | 
|  | 2258 | i915_gem_clflush_object(obj); | 
|  | 2259 | obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 2260 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2261 |  | 
| Daniel Vetter | 96b47b6 | 2009-12-15 17:50:00 +0100 | [diff] [blame] | 2262 | /* release the fence reg _after_ flushing */ | 
|  | 2263 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 
|  | 2264 | i915_gem_clear_fence_reg(obj); | 
|  | 2265 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 2266 | drm_unbind_agp(obj_priv->agp_mem); | 
|  | 2267 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2268 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2269 | i915_gem_object_put_pages_gtt(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2270 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2271 | i915_gem_info_remove_gtt(dev_priv, obj_priv); | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 2272 | list_del_init(&obj_priv->mm_list); | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2273 | /* Avoid an unnecessary call to unbind on rebind. */ | 
|  | 2274 | obj_priv->map_and_fenceable = true; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2275 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 2276 | drm_mm_put_block(obj_priv->gtt_space); | 
|  | 2277 | obj_priv->gtt_space = NULL; | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 2278 | obj_priv->gtt_offset = 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2279 |  | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 2280 | if (i915_gem_object_is_purgeable(obj_priv)) | 
|  | 2281 | i915_gem_object_truncate(obj); | 
|  | 2282 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2283 | trace_i915_gem_object_unbind(obj); | 
|  | 2284 |  | 
| Chris Wilson | 8dc1775 | 2010-07-23 23:18:51 +0100 | [diff] [blame] | 2285 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2286 | } | 
|  | 2287 |  | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 2288 | static int i915_ring_idle(struct drm_device *dev, | 
|  | 2289 | struct intel_ring_buffer *ring) | 
|  | 2290 | { | 
| Chris Wilson | 395b70b | 2010-10-28 21:28:46 +0100 | [diff] [blame] | 2291 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | 
| Chris Wilson | 6419340 | 2010-10-24 12:38:05 +0100 | [diff] [blame] | 2292 | return 0; | 
|  | 2293 |  | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 2294 | i915_gem_flush_ring(dev, NULL, ring, | 
|  | 2295 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 
|  | 2296 | return i915_wait_request(dev, | 
|  | 2297 | i915_gem_next_request_seqno(dev, ring), | 
|  | 2298 | ring); | 
|  | 2299 | } | 
|  | 2300 |  | 
| Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 2301 | int | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2302 | i915_gpu_idle(struct drm_device *dev) | 
|  | 2303 | { | 
|  | 2304 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 2305 | bool lists_empty; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2306 | int ret; | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2307 |  | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2308 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 
| Chris Wilson | 395b70b | 2010-10-28 21:28:46 +0100 | [diff] [blame] | 2309 | list_empty(&dev_priv->mm.active_list)); | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2310 | if (lists_empty) | 
|  | 2311 | return 0; | 
|  | 2312 |  | 
|  | 2313 | /* Flush everything onto the inactive list. */ | 
| Chris Wilson | a56ba56 | 2010-09-28 10:07:56 +0100 | [diff] [blame] | 2314 | ret = i915_ring_idle(dev, &dev_priv->render_ring); | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 2315 | if (ret) | 
|  | 2316 | return ret; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2317 |  | 
| Chris Wilson | 87acb0a | 2010-10-19 10:13:00 +0100 | [diff] [blame] | 2318 | ret = i915_ring_idle(dev, &dev_priv->bsd_ring); | 
|  | 2319 | if (ret) | 
|  | 2320 | return ret; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2321 |  | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 2322 | ret = i915_ring_idle(dev, &dev_priv->blt_ring); | 
|  | 2323 | if (ret) | 
|  | 2324 | return ret; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2325 |  | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 2326 | return 0; | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2327 | } | 
|  | 2328 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2329 | static void sandybridge_write_fence_reg(struct drm_gem_object *obj) | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2330 | { | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2331 | struct drm_device *dev = obj->dev; | 
|  | 2332 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2333 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2334 | u32 size = i915_gem_get_gtt_size(obj_priv); | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2335 | int regnum = obj_priv->fence_reg; | 
|  | 2336 | uint64_t val; | 
|  | 2337 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2338 | val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2339 | 0xfffff000) << 32; | 
|  | 2340 | val |= obj_priv->gtt_offset & 0xfffff000; | 
|  | 2341 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | 
|  | 2342 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | 
|  | 2343 |  | 
|  | 2344 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2345 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 
|  | 2346 | val |= I965_FENCE_REG_VALID; | 
|  | 2347 |  | 
|  | 2348 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | 
|  | 2349 | } | 
|  | 2350 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2351 | static void i965_write_fence_reg(struct drm_gem_object *obj) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2352 | { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2353 | struct drm_device *dev = obj->dev; | 
|  | 2354 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2355 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2356 | u32 size = i915_gem_get_gtt_size(obj_priv); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2357 | int regnum = obj_priv->fence_reg; | 
|  | 2358 | uint64_t val; | 
|  | 2359 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2360 | val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2361 | 0xfffff000) << 32; | 
|  | 2362 | val |= obj_priv->gtt_offset & 0xfffff000; | 
|  | 2363 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 
|  | 2364 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2365 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 
|  | 2366 | val |= I965_FENCE_REG_VALID; | 
|  | 2367 |  | 
|  | 2368 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | 
|  | 2369 | } | 
|  | 2370 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2371 | static void i915_write_fence_reg(struct drm_gem_object *obj) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2372 | { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2373 | struct drm_device *dev = obj->dev; | 
|  | 2374 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2375 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2376 | u32 size = i915_gem_get_gtt_size(obj_priv); | 
|  | 2377 | uint32_t fence_reg, val, pitch_val; | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2378 | int tile_width; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2379 |  | 
|  | 2380 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2381 | (obj_priv->gtt_offset & (size - 1))) { | 
|  | 2382 | WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2383 | __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2384 | obj_priv->gtt_space->start, obj_priv->gtt_space->size); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2385 | return; | 
|  | 2386 | } | 
|  | 2387 |  | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2388 | if (obj_priv->tiling_mode == I915_TILING_Y && | 
|  | 2389 | HAS_128_BYTE_Y_TILING(dev)) | 
|  | 2390 | tile_width = 128; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2391 | else | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2392 | tile_width = 512; | 
|  | 2393 |  | 
|  | 2394 | /* Note: pitch better be a power of two tile widths */ | 
|  | 2395 | pitch_val = obj_priv->stride / tile_width; | 
|  | 2396 | pitch_val = ffs(pitch_val) - 1; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2397 |  | 
| Daniel Vetter | c36a2a6 | 2010-04-17 15:12:03 +0200 | [diff] [blame] | 2398 | if (obj_priv->tiling_mode == I915_TILING_Y && | 
|  | 2399 | HAS_128_BYTE_Y_TILING(dev)) | 
|  | 2400 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 
|  | 2401 | else | 
|  | 2402 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | 
|  | 2403 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2404 | val = obj_priv->gtt_offset; | 
|  | 2405 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2406 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2407 | val |= I915_FENCE_SIZE_BITS(size); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2408 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 
|  | 2409 | val |= I830_FENCE_REG_VALID; | 
|  | 2410 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2411 | fence_reg = obj_priv->fence_reg; | 
|  | 2412 | if (fence_reg < 8) | 
|  | 2413 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2414 | else | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2415 | fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2416 | I915_WRITE(fence_reg, val); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2417 | } | 
|  | 2418 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2419 | static void i830_write_fence_reg(struct drm_gem_object *obj) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2420 | { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2421 | struct drm_device *dev = obj->dev; | 
|  | 2422 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2423 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2424 | u32 size = i915_gem_get_gtt_size(obj_priv); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2425 | int regnum = obj_priv->fence_reg; | 
|  | 2426 | uint32_t val; | 
|  | 2427 | uint32_t pitch_val; | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2428 | uint32_t fence_size_bits; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2429 |  | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2430 | if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2431 | (obj_priv->gtt_offset & (obj->size - 1))) { | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2432 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2433 | __func__, obj_priv->gtt_offset); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2434 | return; | 
|  | 2435 | } | 
|  | 2436 |  | 
| Eric Anholt | e76a16d | 2009-05-26 17:44:56 -0700 | [diff] [blame] | 2437 | pitch_val = obj_priv->stride / 128; | 
|  | 2438 | pitch_val = ffs(pitch_val) - 1; | 
|  | 2439 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 
|  | 2440 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2441 | val = obj_priv->gtt_offset; | 
|  | 2442 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2443 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2444 | fence_size_bits = I830_FENCE_SIZE_BITS(size); | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2445 | WARN_ON(fence_size_bits & ~0x00000f00); | 
|  | 2446 | val |= fence_size_bits; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2447 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 
|  | 2448 | val |= I830_FENCE_REG_VALID; | 
|  | 2449 |  | 
|  | 2450 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2451 | } | 
|  | 2452 |  | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2453 | static int i915_find_fence_reg(struct drm_device *dev, | 
|  | 2454 | bool interruptible) | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2455 | { | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2456 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2457 | struct drm_i915_fence_reg *reg; | 
|  | 2458 | struct drm_i915_gem_object *obj_priv = NULL; | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2459 | int i, avail, ret; | 
|  | 2460 |  | 
|  | 2461 | /* First try to find a free reg */ | 
|  | 2462 | avail = 0; | 
|  | 2463 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 
|  | 2464 | reg = &dev_priv->fence_regs[i]; | 
|  | 2465 | if (!reg->obj) | 
|  | 2466 | return i; | 
|  | 2467 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2468 | obj_priv = to_intel_bo(reg->obj); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2469 | if (!obj_priv->pin_count) | 
|  | 2470 | avail++; | 
|  | 2471 | } | 
|  | 2472 |  | 
|  | 2473 | if (avail == 0) | 
|  | 2474 | return -ENOSPC; | 
|  | 2475 |  | 
|  | 2476 | /* None available, try to steal one or wait for a user to finish */ | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2477 | avail = I915_FENCE_REG_NONE; | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2478 | list_for_each_entry(reg, &dev_priv->mm.fence_list, | 
|  | 2479 | lru_list) { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2480 | obj_priv = to_intel_bo(reg->obj); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2481 | if (obj_priv->pin_count) | 
|  | 2482 | continue; | 
|  | 2483 |  | 
|  | 2484 | /* found one! */ | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2485 | avail = obj_priv->fence_reg; | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2486 | break; | 
|  | 2487 | } | 
|  | 2488 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2489 | BUG_ON(avail == I915_FENCE_REG_NONE); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2490 |  | 
|  | 2491 | /* We only have a reference on obj from the active list. put_fence_reg | 
|  | 2492 | * might drop that one, causing a use-after-free in it. So hold a | 
|  | 2493 | * private reference to obj like the other callers of put_fence_reg | 
|  | 2494 | * (set_tiling ioctl) do. */ | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2495 | drm_gem_object_reference(&obj_priv->base); | 
|  | 2496 | ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); | 
|  | 2497 | drm_gem_object_unreference(&obj_priv->base); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2498 | if (ret != 0) | 
|  | 2499 | return ret; | 
|  | 2500 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2501 | return avail; | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2502 | } | 
|  | 2503 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2504 | /** | 
|  | 2505 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 
|  | 2506 | * @obj: object to map through a fence reg | 
|  | 2507 | * | 
|  | 2508 | * When mapping objects through the GTT, userspace wants to be able to write | 
|  | 2509 | * to them without having to worry about swizzling if the object is tiled. | 
|  | 2510 | * | 
|  | 2511 | * This function walks the fence regs looking for a free one for @obj, | 
|  | 2512 | * stealing one if it can't find any. | 
|  | 2513 | * | 
|  | 2514 | * It then sets up the reg based on the object's properties: address, pitch | 
|  | 2515 | * and tiling format. | 
|  | 2516 | */ | 
| Chris Wilson | 8c4b8c3 | 2009-06-17 22:08:52 +0100 | [diff] [blame] | 2517 | int | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2518 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj, | 
|  | 2519 | bool interruptible) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2520 | { | 
|  | 2521 | struct drm_device *dev = obj->dev; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2522 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2523 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2524 | struct drm_i915_fence_reg *reg = NULL; | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2525 | int ret; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2526 |  | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 2527 | /* Just update our place in the LRU if our fence is getting used. */ | 
|  | 2528 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2529 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 2530 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 2531 | return 0; | 
|  | 2532 | } | 
|  | 2533 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2534 | switch (obj_priv->tiling_mode) { | 
|  | 2535 | case I915_TILING_NONE: | 
|  | 2536 | WARN(1, "allocating a fence for non-tiled object?\n"); | 
|  | 2537 | break; | 
|  | 2538 | case I915_TILING_X: | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2539 | if (!obj_priv->stride) | 
|  | 2540 | return -EINVAL; | 
|  | 2541 | WARN((obj_priv->stride & (512 - 1)), | 
|  | 2542 | "object 0x%08x is X tiled but has non-512B pitch\n", | 
|  | 2543 | obj_priv->gtt_offset); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2544 | break; | 
|  | 2545 | case I915_TILING_Y: | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2546 | if (!obj_priv->stride) | 
|  | 2547 | return -EINVAL; | 
|  | 2548 | WARN((obj_priv->stride & (128 - 1)), | 
|  | 2549 | "object 0x%08x is Y tiled but has non-128B pitch\n", | 
|  | 2550 | obj_priv->gtt_offset); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2551 | break; | 
|  | 2552 | } | 
|  | 2553 |  | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2554 | ret = i915_find_fence_reg(dev, interruptible); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2555 | if (ret < 0) | 
|  | 2556 | return ret; | 
| Chris Wilson | fc7170b | 2009-02-11 14:26:46 +0000 | [diff] [blame] | 2557 |  | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2558 | obj_priv->fence_reg = ret; | 
|  | 2559 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2560 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 2561 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2562 | reg->obj = obj; | 
|  | 2563 |  | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2564 | switch (INTEL_INFO(dev)->gen) { | 
|  | 2565 | case 6: | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2566 | sandybridge_write_fence_reg(obj); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2567 | break; | 
|  | 2568 | case 5: | 
|  | 2569 | case 4: | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2570 | i965_write_fence_reg(obj); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2571 | break; | 
|  | 2572 | case 3: | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2573 | i915_write_fence_reg(obj); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2574 | break; | 
|  | 2575 | case 2: | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2576 | i830_write_fence_reg(obj); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2577 | break; | 
|  | 2578 | } | 
| Eric Anholt | d9ddcb9 | 2009-01-27 10:33:49 -0800 | [diff] [blame] | 2579 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2580 | trace_i915_gem_object_get_fence(obj, | 
|  | 2581 | obj_priv->fence_reg, | 
|  | 2582 | obj_priv->tiling_mode); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2583 |  | 
| Eric Anholt | d9ddcb9 | 2009-01-27 10:33:49 -0800 | [diff] [blame] | 2584 | return 0; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2585 | } | 
|  | 2586 |  | 
|  | 2587 | /** | 
|  | 2588 | * i915_gem_clear_fence_reg - clear out fence register info | 
|  | 2589 | * @obj: object to clear | 
|  | 2590 | * | 
|  | 2591 | * Zeroes out the fence register itself and clears out the associated | 
|  | 2592 | * data structures in dev_priv and obj_priv. | 
|  | 2593 | */ | 
|  | 2594 | static void | 
|  | 2595 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | 
|  | 2596 | { | 
|  | 2597 | struct drm_device *dev = obj->dev; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2598 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2599 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2600 | struct drm_i915_fence_reg *reg = | 
|  | 2601 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2602 | uint32_t fence_reg; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2603 |  | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2604 | switch (INTEL_INFO(dev)->gen) { | 
|  | 2605 | case 6: | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2606 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 
|  | 2607 | (obj_priv->fence_reg * 8), 0); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2608 | break; | 
|  | 2609 | case 5: | 
|  | 2610 | case 4: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2611 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2612 | break; | 
|  | 2613 | case 3: | 
| Chris Wilson | 9b74f73 | 2010-09-22 19:10:44 +0100 | [diff] [blame] | 2614 | if (obj_priv->fence_reg >= 8) | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2615 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2616 | else | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2617 | case 2: | 
|  | 2618 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2619 |  | 
|  | 2620 | I915_WRITE(fence_reg, 0); | 
| Chris Wilson | e259bef | 2010-09-17 00:32:02 +0100 | [diff] [blame] | 2621 | break; | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2622 | } | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2623 |  | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2624 | reg->obj = NULL; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2625 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2626 | list_del_init(®->lru_list); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2627 | } | 
|  | 2628 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2629 | /** | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2630 | * i915_gem_object_put_fence_reg - waits on outstanding fenced access | 
|  | 2631 | * to the buffer to finish, and then resets the fence register. | 
|  | 2632 | * @obj: tiled object holding a fence register. | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2633 | * @bool: whether the wait upon the fence is interruptible | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2634 | * | 
|  | 2635 | * Zeroes out the fence register itself and clears out the associated | 
|  | 2636 | * data structures in dev_priv and obj_priv. | 
|  | 2637 | */ | 
|  | 2638 | int | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2639 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | 
|  | 2640 | bool interruptible) | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2641 | { | 
|  | 2642 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 53640e1 | 2010-09-20 11:40:50 +0100 | [diff] [blame] | 2643 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2644 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 53640e1 | 2010-09-20 11:40:50 +0100 | [diff] [blame] | 2645 | struct drm_i915_fence_reg *reg; | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2646 |  | 
|  | 2647 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 
|  | 2648 | return 0; | 
|  | 2649 |  | 
| Daniel Vetter | 10ae9bd | 2010-02-01 13:59:17 +0100 | [diff] [blame] | 2650 | /* If we've changed tiling, GTT-mappings of the object | 
|  | 2651 | * need to re-fault to ensure that the correct fence register | 
|  | 2652 | * setup is in place. | 
|  | 2653 | */ | 
|  | 2654 | i915_gem_release_mmap(obj); | 
|  | 2655 |  | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2656 | /* On the i915, GPU access to tiled buffers is via a fence, | 
|  | 2657 | * therefore we must wait for any outstanding access to complete | 
|  | 2658 | * before clearing the fence. | 
|  | 2659 | */ | 
| Chris Wilson | 53640e1 | 2010-09-20 11:40:50 +0100 | [diff] [blame] | 2660 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 2661 | if (reg->gpu) { | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2662 | int ret; | 
|  | 2663 |  | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2664 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 
| Chris Wilson | 0bc23aa | 2010-09-14 10:22:23 +0100 | [diff] [blame] | 2665 | if (ret) | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2666 | return ret; | 
|  | 2667 |  | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2668 | ret = i915_gem_object_wait_rendering(obj, interruptible); | 
| Chris Wilson | 0bc23aa | 2010-09-14 10:22:23 +0100 | [diff] [blame] | 2669 | if (ret) | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2670 | return ret; | 
| Chris Wilson | 53640e1 | 2010-09-20 11:40:50 +0100 | [diff] [blame] | 2671 |  | 
|  | 2672 | reg->gpu = false; | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2673 | } | 
|  | 2674 |  | 
| Daniel Vetter | 4a72661 | 2010-02-01 13:59:16 +0100 | [diff] [blame] | 2675 | i915_gem_object_flush_gtt_write_domain(obj); | 
| Chris Wilson | 0bc23aa | 2010-09-14 10:22:23 +0100 | [diff] [blame] | 2676 | i915_gem_clear_fence_reg(obj); | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2677 |  | 
|  | 2678 | return 0; | 
|  | 2679 | } | 
|  | 2680 |  | 
|  | 2681 | /** | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2682 | * Finds free space in the GTT aperture and binds the object there. | 
|  | 2683 | */ | 
|  | 2684 | static int | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2685 | i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 
|  | 2686 | unsigned alignment, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2687 | bool map_and_fenceable) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2688 | { | 
|  | 2689 | struct drm_device *dev = obj->dev; | 
|  | 2690 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2691 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2692 | struct drm_mm_node *free_space; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2693 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | 
|  | 2694 | u32 size, fence_size, fence_alignment; | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2695 | bool mappable, fenceable; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2696 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2697 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 2698 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 2699 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 
|  | 2700 | return -EINVAL; | 
|  | 2701 | } | 
|  | 2702 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2703 | fence_size = i915_gem_get_gtt_size(obj_priv); | 
|  | 2704 | fence_alignment = i915_gem_get_gtt_alignment(obj_priv); | 
|  | 2705 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2706 | if (alignment == 0) | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2707 | alignment = map_and_fenceable ? fence_alignment : 4096; | 
|  | 2708 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2709 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 
|  | 2710 | return -EINVAL; | 
|  | 2711 | } | 
|  | 2712 |  | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2713 | size = map_and_fenceable ? fence_size : obj->size; | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2714 |  | 
| Chris Wilson | 654fc60 | 2010-05-27 13:18:21 +0100 | [diff] [blame] | 2715 | /* If the object is bigger than the entire aperture, reject it early | 
|  | 2716 | * before evicting everything in a vain attempt to find space. | 
|  | 2717 | */ | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2718 | if (obj->size > | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2719 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { | 
| Chris Wilson | 654fc60 | 2010-05-27 13:18:21 +0100 | [diff] [blame] | 2720 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 
|  | 2721 | return -E2BIG; | 
|  | 2722 | } | 
|  | 2723 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2724 | search_free: | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2725 | if (map_and_fenceable) | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2726 | free_space = | 
|  | 2727 | drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2728 | size, alignment, 0, | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2729 | dev_priv->mm.gtt_mappable_end, | 
|  | 2730 | 0); | 
|  | 2731 | else | 
|  | 2732 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2733 | size, alignment, 0); | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2734 |  | 
|  | 2735 | if (free_space != NULL) { | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2736 | if (map_and_fenceable) | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2737 | obj_priv->gtt_space = | 
|  | 2738 | drm_mm_get_block_range_generic(free_space, | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2739 | size, alignment, 0, | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2740 | dev_priv->mm.gtt_mappable_end, | 
|  | 2741 | 0); | 
|  | 2742 | else | 
|  | 2743 | obj_priv->gtt_space = | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2744 | drm_mm_get_block(free_space, size, alignment); | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 2745 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2746 | if (obj_priv->gtt_space == NULL) { | 
|  | 2747 | /* If the gtt is empty and we're still having trouble | 
|  | 2748 | * fitting our object in, we're out of memory. | 
|  | 2749 | */ | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2750 | ret = i915_gem_evict_something(dev, size, alignment, | 
|  | 2751 | map_and_fenceable); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2752 | if (ret) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2753 | return ret; | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2754 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2755 | goto search_free; | 
|  | 2756 | } | 
|  | 2757 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2758 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2759 | if (ret) { | 
|  | 2760 | drm_mm_put_block(obj_priv->gtt_space); | 
|  | 2761 | obj_priv->gtt_space = NULL; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2762 |  | 
|  | 2763 | if (ret == -ENOMEM) { | 
|  | 2764 | /* first try to clear up some space from the GTT */ | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2765 | ret = i915_gem_evict_something(dev, size, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2766 | alignment, | 
|  | 2767 | map_and_fenceable); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2768 | if (ret) { | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2769 | /* now try to shrink everyone else */ | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2770 | if (gfpmask) { | 
|  | 2771 | gfpmask = 0; | 
|  | 2772 | goto search_free; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2773 | } | 
|  | 2774 |  | 
|  | 2775 | return ret; | 
|  | 2776 | } | 
|  | 2777 |  | 
|  | 2778 | goto search_free; | 
|  | 2779 | } | 
|  | 2780 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2781 | return ret; | 
|  | 2782 | } | 
|  | 2783 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2784 | /* Create an AGP memory structure pointing at our pages, and bind it | 
|  | 2785 | * into the GTT. | 
|  | 2786 | */ | 
|  | 2787 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2788 | obj_priv->pages, | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2789 | obj->size >> PAGE_SHIFT, | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 2790 | obj_priv->gtt_space->start, | 
| Keith Packard | ba1eb1d | 2008-10-14 19:55:10 -0700 | [diff] [blame] | 2791 | obj_priv->agp_type); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2792 | if (obj_priv->agp_mem == NULL) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 2793 | i915_gem_object_put_pages_gtt(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2794 | drm_mm_put_block(obj_priv->gtt_space); | 
|  | 2795 | obj_priv->gtt_space = NULL; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2796 |  | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2797 | ret = i915_gem_evict_something(dev, size, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2798 | alignment, map_and_fenceable); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2799 | if (ret) | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2800 | return ret; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2801 |  | 
|  | 2802 | goto search_free; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2803 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2804 |  | 
| Daniel Vetter | fb7d516 | 2010-10-01 22:05:20 +0200 | [diff] [blame] | 2805 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | 
|  | 2806 |  | 
| Chris Wilson | bf1a109 | 2010-08-07 11:01:20 +0100 | [diff] [blame] | 2807 | /* keep track of bounds object by adding it to the inactive list */ | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 2808 | list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2809 | i915_gem_info_add_gtt(dev_priv, obj_priv); | 
| Chris Wilson | bf1a109 | 2010-08-07 11:01:20 +0100 | [diff] [blame] | 2810 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2811 | /* Assert that the object is not currently in any GPU domain. As it | 
|  | 2812 | * wasn't in the GTT, there shouldn't be any way it could have been in | 
|  | 2813 | * a GPU cache | 
|  | 2814 | */ | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 2815 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 
|  | 2816 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2817 |  | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2818 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2819 |  | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2820 | fenceable = | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2821 | obj_priv->gtt_space->size == fence_size && | 
|  | 2822 | (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; | 
|  | 2823 |  | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2824 | mappable = | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 2825 | obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; | 
|  | 2826 |  | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 2827 | obj_priv->map_and_fenceable = mappable && fenceable; | 
|  | 2828 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2829 | return 0; | 
|  | 2830 | } | 
|  | 2831 |  | 
|  | 2832 | void | 
|  | 2833 | i915_gem_clflush_object(struct drm_gem_object *obj) | 
|  | 2834 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2835 | struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2836 |  | 
|  | 2837 | /* If we don't have a page list set up, then we're not pinned | 
|  | 2838 | * to GPU, and we can ignore the cache flush because it'll happen | 
|  | 2839 | * again at bind time. | 
|  | 2840 | */ | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2841 | if (obj_priv->pages == NULL) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2842 | return; | 
|  | 2843 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2844 | trace_i915_gem_object_clflush(obj); | 
| Eric Anholt | cfa16a0 | 2009-05-26 18:46:16 -0700 | [diff] [blame] | 2845 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2846 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2847 | } | 
|  | 2848 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2849 | /** Flushes any GPU write domain for the object if it's dirty. */ | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2850 | static int | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2851 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 
|  | 2852 | bool pipelined) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2853 | { | 
|  | 2854 | struct drm_device *dev = obj->dev; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2855 |  | 
|  | 2856 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2857 | return 0; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2858 |  | 
|  | 2859 | /* Queue the GPU write cache flushing we need. */ | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 2860 | i915_gem_flush_ring(dev, NULL, | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 2861 | to_intel_bo(obj)->ring, | 
|  | 2862 | 0, obj->write_domain); | 
| Chris Wilson | 48b956c | 2010-09-14 12:50:34 +0100 | [diff] [blame] | 2863 | BUG_ON(obj->write_domain); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2864 |  | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2865 | if (pipelined) | 
|  | 2866 | return 0; | 
|  | 2867 |  | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2868 | return i915_gem_object_wait_rendering(obj, true); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2869 | } | 
|  | 2870 |  | 
|  | 2871 | /** Flushes the GTT write domain for the object if it's dirty. */ | 
|  | 2872 | static void | 
|  | 2873 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 
|  | 2874 | { | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2875 | uint32_t old_write_domain; | 
|  | 2876 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2877 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 
|  | 2878 | return; | 
|  | 2879 |  | 
|  | 2880 | /* No actual flushing is required for the GTT write domain.   Writes | 
|  | 2881 | * to it immediately go to main memory as far as we know, so there's | 
|  | 2882 | * no chipset flush.  It also doesn't land in render cache. | 
|  | 2883 | */ | 
| Chris Wilson | 4a684a4 | 2010-10-28 14:44:08 +0100 | [diff] [blame] | 2884 | i915_gem_release_mmap(obj); | 
|  | 2885 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2886 | old_write_domain = obj->write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2887 | obj->write_domain = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2888 |  | 
|  | 2889 | trace_i915_gem_object_change_domain(obj, | 
|  | 2890 | obj->read_domains, | 
|  | 2891 | old_write_domain); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2892 | } | 
|  | 2893 |  | 
|  | 2894 | /** Flushes the CPU write domain for the object if it's dirty. */ | 
|  | 2895 | static void | 
|  | 2896 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 
|  | 2897 | { | 
|  | 2898 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2899 | uint32_t old_write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2900 |  | 
|  | 2901 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 
|  | 2902 | return; | 
|  | 2903 |  | 
|  | 2904 | i915_gem_clflush_object(obj); | 
|  | 2905 | drm_agp_chipset_flush(dev); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2906 | old_write_domain = obj->write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2907 | obj->write_domain = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2908 |  | 
|  | 2909 | trace_i915_gem_object_change_domain(obj, | 
|  | 2910 | obj->read_domains, | 
|  | 2911 | old_write_domain); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2912 | } | 
|  | 2913 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2914 | /** | 
|  | 2915 | * Moves a single object to the GTT read, and possibly write domain. | 
|  | 2916 | * | 
|  | 2917 | * This function returns when the move is complete, including waiting on | 
|  | 2918 | * flushes to occur. | 
|  | 2919 | */ | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2920 | int | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2921 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 
|  | 2922 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2923 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2924 | uint32_t old_write_domain, old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2925 | int ret; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2926 |  | 
| Eric Anholt | 0235439 | 2008-11-26 13:58:13 -0800 | [diff] [blame] | 2927 | /* Not valid to be called on unbound objects. */ | 
|  | 2928 | if (obj_priv->gtt_space == NULL) | 
|  | 2929 | return -EINVAL; | 
|  | 2930 |  | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2931 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2932 | if (ret != 0) | 
|  | 2933 | return ret; | 
|  | 2934 |  | 
| Chris Wilson | 7213342 | 2010-09-13 23:56:38 +0100 | [diff] [blame] | 2935 | i915_gem_object_flush_cpu_write_domain(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2936 |  | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2937 | if (write) { | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 2938 | ret = i915_gem_object_wait_rendering(obj, true); | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2939 | if (ret) | 
|  | 2940 | return ret; | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2941 | } | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2942 |  | 
|  | 2943 | old_write_domain = obj->write_domain; | 
|  | 2944 | old_read_domains = obj->read_domains; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2945 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2946 | /* It should now be out of any other write domains, and we can update | 
|  | 2947 | * the domain values for our changes. | 
|  | 2948 | */ | 
|  | 2949 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 
|  | 2950 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2951 | if (write) { | 
| Chris Wilson | 7213342 | 2010-09-13 23:56:38 +0100 | [diff] [blame] | 2952 | obj->read_domains = I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2953 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2954 | obj_priv->dirty = 1; | 
|  | 2955 | } | 
|  | 2956 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2957 | trace_i915_gem_object_change_domain(obj, | 
|  | 2958 | old_read_domains, | 
|  | 2959 | old_write_domain); | 
|  | 2960 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2961 | return 0; | 
|  | 2962 | } | 
|  | 2963 |  | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2964 | /* | 
|  | 2965 | * Prepare buffer for display plane. Use uninterruptible for possible flush | 
|  | 2966 | * wait, as in modesetting process we're not supposed to be interrupted. | 
|  | 2967 | */ | 
|  | 2968 | int | 
| Chris Wilson | 48b956c | 2010-09-14 12:50:34 +0100 | [diff] [blame] | 2969 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | 
|  | 2970 | bool pipelined) | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2971 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2972 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2973 | uint32_t old_read_domains; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2974 | int ret; | 
|  | 2975 |  | 
|  | 2976 | /* Not valid to be called on unbound objects. */ | 
|  | 2977 | if (obj_priv->gtt_space == NULL) | 
|  | 2978 | return -EINVAL; | 
|  | 2979 |  | 
| Chris Wilson | ced270f | 2010-09-26 22:47:46 +0100 | [diff] [blame] | 2980 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2981 | if (ret) | 
|  | 2982 | return ret; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2983 |  | 
| Chris Wilson | ced270f | 2010-09-26 22:47:46 +0100 | [diff] [blame] | 2984 | /* Currently, we are always called from an non-interruptible context. */ | 
|  | 2985 | if (!pipelined) { | 
|  | 2986 | ret = i915_gem_object_wait_rendering(obj, false); | 
|  | 2987 | if (ret) | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2988 | return ret; | 
|  | 2989 | } | 
|  | 2990 |  | 
| Chris Wilson | b118c1e | 2010-05-27 13:18:14 +0100 | [diff] [blame] | 2991 | i915_gem_object_flush_cpu_write_domain(obj); | 
|  | 2992 |  | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2993 | old_read_domains = obj->read_domains; | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 2994 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2995 |  | 
|  | 2996 | trace_i915_gem_object_change_domain(obj, | 
|  | 2997 | old_read_domains, | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 2998 | obj->write_domain); | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2999 |  | 
|  | 3000 | return 0; | 
|  | 3001 | } | 
|  | 3002 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3003 | /** | 
|  | 3004 | * Moves a single object to the CPU read, and possibly write domain. | 
|  | 3005 | * | 
|  | 3006 | * This function returns when the move is complete, including waiting on | 
|  | 3007 | * flushes to occur. | 
|  | 3008 | */ | 
|  | 3009 | static int | 
|  | 3010 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 
|  | 3011 | { | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3012 | uint32_t old_write_domain, old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3013 | int ret; | 
|  | 3014 |  | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 3015 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3016 | if (ret != 0) | 
|  | 3017 | return ret; | 
|  | 3018 |  | 
|  | 3019 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 3020 |  | 
|  | 3021 | /* If we have a partially-valid cache of the object in the CPU, | 
|  | 3022 | * finish invalidating it and free the per-page flags. | 
|  | 3023 | */ | 
|  | 3024 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 
|  | 3025 |  | 
| Chris Wilson | 7213342 | 2010-09-13 23:56:38 +0100 | [diff] [blame] | 3026 | if (write) { | 
| Chris Wilson | 2cf34d7 | 2010-09-14 13:03:28 +0100 | [diff] [blame] | 3027 | ret = i915_gem_object_wait_rendering(obj, true); | 
| Chris Wilson | 7213342 | 2010-09-13 23:56:38 +0100 | [diff] [blame] | 3028 | if (ret) | 
|  | 3029 | return ret; | 
|  | 3030 | } | 
|  | 3031 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3032 | old_write_domain = obj->write_domain; | 
|  | 3033 | old_read_domains = obj->read_domains; | 
|  | 3034 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3035 | /* Flush the CPU cache if it's still invalid. */ | 
|  | 3036 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 
|  | 3037 | i915_gem_clflush_object(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3038 |  | 
|  | 3039 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 
|  | 3040 | } | 
|  | 3041 |  | 
|  | 3042 | /* It should now be out of any other write domains, and we can update | 
|  | 3043 | * the domain values for our changes. | 
|  | 3044 | */ | 
|  | 3045 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 
|  | 3046 |  | 
|  | 3047 | /* If we're writing through the CPU, then the GPU read domains will | 
|  | 3048 | * need to be invalidated at next use. | 
|  | 3049 | */ | 
|  | 3050 | if (write) { | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 3051 | obj->read_domains = I915_GEM_DOMAIN_CPU; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3052 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 3053 | } | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3054 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3055 | trace_i915_gem_object_change_domain(obj, | 
|  | 3056 | old_read_domains, | 
|  | 3057 | old_write_domain); | 
|  | 3058 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3059 | return 0; | 
|  | 3060 | } | 
|  | 3061 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3062 | /* | 
|  | 3063 | * Set the next domain for the specified object. This | 
|  | 3064 | * may not actually perform the necessary flushing/invaliding though, | 
|  | 3065 | * as that may want to be batched with other set_domain operations | 
|  | 3066 | * | 
|  | 3067 | * This is (we hope) the only really tricky part of gem. The goal | 
|  | 3068 | * is fairly simple -- track which caches hold bits of the object | 
|  | 3069 | * and make sure they remain coherent. A few concrete examples may | 
|  | 3070 | * help to explain how it works. For shorthand, we use the notation | 
|  | 3071 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | 
|  | 3072 | * a pair of read and write domain masks. | 
|  | 3073 | * | 
|  | 3074 | * Case 1: the batch buffer | 
|  | 3075 | * | 
|  | 3076 | *	1. Allocated | 
|  | 3077 | *	2. Written by CPU | 
|  | 3078 | *	3. Mapped to GTT | 
|  | 3079 | *	4. Read by GPU | 
|  | 3080 | *	5. Unmapped from GTT | 
|  | 3081 | *	6. Freed | 
|  | 3082 | * | 
|  | 3083 | *	Let's take these a step at a time | 
|  | 3084 | * | 
|  | 3085 | *	1. Allocated | 
|  | 3086 | *		Pages allocated from the kernel may still have | 
|  | 3087 | *		cache contents, so we set them to (CPU, CPU) always. | 
|  | 3088 | *	2. Written by CPU (using pwrite) | 
|  | 3089 | *		The pwrite function calls set_domain (CPU, CPU) and | 
|  | 3090 | *		this function does nothing (as nothing changes) | 
|  | 3091 | *	3. Mapped by GTT | 
|  | 3092 | *		This function asserts that the object is not | 
|  | 3093 | *		currently in any GPU-based read or write domains | 
|  | 3094 | *	4. Read by GPU | 
|  | 3095 | *		i915_gem_execbuffer calls set_domain (COMMAND, 0). | 
|  | 3096 | *		As write_domain is zero, this function adds in the | 
|  | 3097 | *		current read domains (CPU+COMMAND, 0). | 
|  | 3098 | *		flush_domains is set to CPU. | 
|  | 3099 | *		invalidate_domains is set to COMMAND | 
|  | 3100 | *		clflush is run to get data out of the CPU caches | 
|  | 3101 | *		then i915_dev_set_domain calls i915_gem_flush to | 
|  | 3102 | *		emit an MI_FLUSH and drm_agp_chipset_flush | 
|  | 3103 | *	5. Unmapped from GTT | 
|  | 3104 | *		i915_gem_object_unbind calls set_domain (CPU, CPU) | 
|  | 3105 | *		flush_domains and invalidate_domains end up both zero | 
|  | 3106 | *		so no flushing/invalidating happens | 
|  | 3107 | *	6. Freed | 
|  | 3108 | *		yay, done | 
|  | 3109 | * | 
|  | 3110 | * Case 2: The shared render buffer | 
|  | 3111 | * | 
|  | 3112 | *	1. Allocated | 
|  | 3113 | *	2. Mapped to GTT | 
|  | 3114 | *	3. Read/written by GPU | 
|  | 3115 | *	4. set_domain to (CPU,CPU) | 
|  | 3116 | *	5. Read/written by CPU | 
|  | 3117 | *	6. Read/written by GPU | 
|  | 3118 | * | 
|  | 3119 | *	1. Allocated | 
|  | 3120 | *		Same as last example, (CPU, CPU) | 
|  | 3121 | *	2. Mapped to GTT | 
|  | 3122 | *		Nothing changes (assertions find that it is not in the GPU) | 
|  | 3123 | *	3. Read/written by GPU | 
|  | 3124 | *		execbuffer calls set_domain (RENDER, RENDER) | 
|  | 3125 | *		flush_domains gets CPU | 
|  | 3126 | *		invalidate_domains gets GPU | 
|  | 3127 | *		clflush (obj) | 
|  | 3128 | *		MI_FLUSH and drm_agp_chipset_flush | 
|  | 3129 | *	4. set_domain (CPU, CPU) | 
|  | 3130 | *		flush_domains gets GPU | 
|  | 3131 | *		invalidate_domains gets CPU | 
|  | 3132 | *		wait_rendering (obj) to make sure all drawing is complete. | 
|  | 3133 | *		This will include an MI_FLUSH to get the data from GPU | 
|  | 3134 | *		to memory | 
|  | 3135 | *		clflush (obj) to invalidate the CPU cache | 
|  | 3136 | *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | 
|  | 3137 | *	5. Read/written by CPU | 
|  | 3138 | *		cache lines are loaded and dirtied | 
|  | 3139 | *	6. Read written by GPU | 
|  | 3140 | *		Same as last GPU access | 
|  | 3141 | * | 
|  | 3142 | * Case 3: The constant buffer | 
|  | 3143 | * | 
|  | 3144 | *	1. Allocated | 
|  | 3145 | *	2. Written by CPU | 
|  | 3146 | *	3. Read by GPU | 
|  | 3147 | *	4. Updated (written) by CPU again | 
|  | 3148 | *	5. Read by GPU | 
|  | 3149 | * | 
|  | 3150 | *	1. Allocated | 
|  | 3151 | *		(CPU, CPU) | 
|  | 3152 | *	2. Written by CPU | 
|  | 3153 | *		(CPU, CPU) | 
|  | 3154 | *	3. Read by GPU | 
|  | 3155 | *		(CPU+RENDER, 0) | 
|  | 3156 | *		flush_domains = CPU | 
|  | 3157 | *		invalidate_domains = RENDER | 
|  | 3158 | *		clflush (obj) | 
|  | 3159 | *		MI_FLUSH | 
|  | 3160 | *		drm_agp_chipset_flush | 
|  | 3161 | *	4. Updated (written) by CPU again | 
|  | 3162 | *		(CPU, CPU) | 
|  | 3163 | *		flush_domains = 0 (no previous write domain) | 
|  | 3164 | *		invalidate_domains = 0 (no new read domains) | 
|  | 3165 | *	5. Read by GPU | 
|  | 3166 | *		(CPU+RENDER, 0) | 
|  | 3167 | *		flush_domains = CPU | 
|  | 3168 | *		invalidate_domains = RENDER | 
|  | 3169 | *		clflush (obj) | 
|  | 3170 | *		MI_FLUSH | 
|  | 3171 | *		drm_agp_chipset_flush | 
|  | 3172 | */ | 
| Keith Packard | c0d9082 | 2008-11-20 23:11:08 -0800 | [diff] [blame] | 3173 | static void | 
| Chris Wilson | b665145 | 2010-10-23 10:15:06 +0100 | [diff] [blame] | 3174 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3175 | struct intel_ring_buffer *ring, | 
|  | 3176 | struct change_domains *cd) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3177 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3178 | struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3179 | uint32_t			invalidate_domains = 0; | 
|  | 3180 | uint32_t			flush_domains = 0; | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 3181 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3182 | /* | 
|  | 3183 | * If the object isn't moving to a new write domain, | 
|  | 3184 | * let the object stay in multiple read domains | 
|  | 3185 | */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3186 | if (obj->pending_write_domain == 0) | 
|  | 3187 | obj->pending_read_domains |= obj->read_domains; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3188 |  | 
|  | 3189 | /* | 
|  | 3190 | * Flush the current write domain if | 
|  | 3191 | * the new read domains don't match. Invalidate | 
|  | 3192 | * any read domains which differ from the old | 
|  | 3193 | * write domain | 
|  | 3194 | */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3195 | if (obj->write_domain && | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3196 | (obj->write_domain != obj->pending_read_domains || | 
|  | 3197 | obj_priv->ring != ring)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3198 | flush_domains |= obj->write_domain; | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3199 | invalidate_domains |= | 
|  | 3200 | obj->pending_read_domains & ~obj->write_domain; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3201 | } | 
|  | 3202 | /* | 
|  | 3203 | * Invalidate any read caches which may have | 
|  | 3204 | * stale data. That is, any new read domains. | 
|  | 3205 | */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3206 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | 
| Chris Wilson | 3d2a812 | 2010-09-29 11:39:53 +0100 | [diff] [blame] | 3207 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3208 | i915_gem_clflush_object(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3209 |  | 
| Chris Wilson | 4a684a4 | 2010-10-28 14:44:08 +0100 | [diff] [blame] | 3210 | /* blow away mappings if mapped through GTT */ | 
|  | 3211 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) | 
|  | 3212 | i915_gem_release_mmap(obj); | 
|  | 3213 |  | 
| Eric Anholt | efbeed9 | 2009-02-19 14:54:51 -0800 | [diff] [blame] | 3214 | /* The actual obj->write_domain will be updated with | 
|  | 3215 | * pending_write_domain after we emit the accumulated flush for all | 
|  | 3216 | * of our domain changes in execbuffers (which clears objects' | 
|  | 3217 | * write_domains).  So if we have a current write domain that we | 
|  | 3218 | * aren't changing, set pending_write_domain to that. | 
|  | 3219 | */ | 
|  | 3220 | if (flush_domains == 0 && obj->pending_write_domain == 0) | 
|  | 3221 | obj->pending_write_domain = obj->write_domain; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3222 |  | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3223 | cd->invalidate_domains |= invalidate_domains; | 
|  | 3224 | cd->flush_domains |= flush_domains; | 
| Chris Wilson | b665145 | 2010-10-23 10:15:06 +0100 | [diff] [blame] | 3225 | if (flush_domains & I915_GEM_GPU_DOMAINS) | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3226 | cd->flush_rings |= obj_priv->ring->id; | 
| Chris Wilson | b665145 | 2010-10-23 10:15:06 +0100 | [diff] [blame] | 3227 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3228 | cd->flush_rings |= ring->id; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3229 | } | 
|  | 3230 |  | 
|  | 3231 | /** | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3232 | * Moves the object from a partially CPU read to a full one. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3233 | * | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3234 | * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), | 
|  | 3235 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). | 
|  | 3236 | */ | 
|  | 3237 | static void | 
|  | 3238 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 
|  | 3239 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3240 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3241 |  | 
|  | 3242 | if (!obj_priv->page_cpu_valid) | 
|  | 3243 | return; | 
|  | 3244 |  | 
|  | 3245 | /* If we're partially in the CPU read domain, finish moving it in. | 
|  | 3246 | */ | 
|  | 3247 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { | 
|  | 3248 | int i; | 
|  | 3249 |  | 
|  | 3250 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 
|  | 3251 | if (obj_priv->page_cpu_valid[i]) | 
|  | 3252 | continue; | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 3253 | drm_clflush_pages(obj_priv->pages + i, 1); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3254 | } | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3255 | } | 
|  | 3256 |  | 
|  | 3257 | /* Free the page_cpu_valid mappings which are now stale, whether | 
|  | 3258 | * or not we've got I915_GEM_DOMAIN_CPU. | 
|  | 3259 | */ | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3260 | kfree(obj_priv->page_cpu_valid); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3261 | obj_priv->page_cpu_valid = NULL; | 
|  | 3262 | } | 
|  | 3263 |  | 
|  | 3264 | /** | 
|  | 3265 | * Set the CPU read domain on a range of the object. | 
|  | 3266 | * | 
|  | 3267 | * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's | 
|  | 3268 | * not entirely valid.  The page_cpu_valid member of the object flags which | 
|  | 3269 | * pages have been flushed, and will be respected by | 
|  | 3270 | * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping | 
|  | 3271 | * of the whole object. | 
|  | 3272 | * | 
|  | 3273 | * This function returns when the move is complete, including waiting on | 
|  | 3274 | * flushes to occur. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3275 | */ | 
|  | 3276 | static int | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3277 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 
|  | 3278 | uint64_t offset, uint64_t size) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3279 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3280 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3281 | uint32_t old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3282 | int i, ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3283 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3284 | if (offset == 0 && size == obj->size) | 
|  | 3285 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 
|  | 3286 |  | 
| Daniel Vetter | ba3d8d7 | 2010-02-11 22:37:04 +0100 | [diff] [blame] | 3287 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3288 | if (ret != 0) | 
|  | 3289 | return ret; | 
|  | 3290 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 3291 |  | 
|  | 3292 | /* If we're already fully in the CPU read domain, we're done. */ | 
|  | 3293 | if (obj_priv->page_cpu_valid == NULL && | 
|  | 3294 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3295 | return 0; | 
|  | 3296 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3297 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | 
|  | 3298 | * newly adding I915_GEM_DOMAIN_CPU | 
|  | 3299 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3300 | if (obj_priv->page_cpu_valid == NULL) { | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3301 | obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, | 
|  | 3302 | GFP_KERNEL); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3303 | if (obj_priv->page_cpu_valid == NULL) | 
|  | 3304 | return -ENOMEM; | 
|  | 3305 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | 
|  | 3306 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3307 |  | 
|  | 3308 | /* Flush the cache on any pages that are still invalid from the CPU's | 
|  | 3309 | * perspective. | 
|  | 3310 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3311 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; | 
|  | 3312 | i++) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3313 | if (obj_priv->page_cpu_valid[i]) | 
|  | 3314 | continue; | 
|  | 3315 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 3316 | drm_clflush_pages(obj_priv->pages + i, 1); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3317 |  | 
|  | 3318 | obj_priv->page_cpu_valid[i] = 1; | 
|  | 3319 | } | 
|  | 3320 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3321 | /* It should now be out of any other write domains, and we can update | 
|  | 3322 | * the domain values for our changes. | 
|  | 3323 | */ | 
|  | 3324 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 
|  | 3325 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3326 | old_read_domains = obj->read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3327 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 
|  | 3328 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3329 | trace_i915_gem_object_change_domain(obj, | 
|  | 3330 | old_read_domains, | 
|  | 3331 | obj->write_domain); | 
|  | 3332 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3333 | return 0; | 
|  | 3334 | } | 
|  | 3335 |  | 
|  | 3336 | /** | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3337 | * Pin an object to the GTT and evaluate the relocations landing in it. | 
|  | 3338 | */ | 
|  | 3339 | static int | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3340 | i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, | 
|  | 3341 | struct drm_file *file_priv, | 
|  | 3342 | struct drm_i915_gem_exec_object2 *entry) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3343 | { | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3344 | struct drm_device *dev = obj->base.dev; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 3345 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3346 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3347 | struct drm_gem_object *target_obj = NULL; | 
|  | 3348 | uint32_t target_handle = 0; | 
|  | 3349 | int i, ret = 0; | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3350 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3351 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3352 | for (i = 0; i < entry->relocation_count; i++) { | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3353 | struct drm_i915_gem_relocation_entry reloc; | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3354 | uint32_t target_offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3355 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3356 | if (__copy_from_user_inatomic(&reloc, | 
|  | 3357 | user_relocs+i, | 
|  | 3358 | sizeof(reloc))) { | 
|  | 3359 | ret = -EFAULT; | 
|  | 3360 | break; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3361 | } | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3362 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3363 | if (reloc.target_handle != target_handle) { | 
|  | 3364 | drm_gem_object_unreference(target_obj); | 
|  | 3365 |  | 
|  | 3366 | target_obj = drm_gem_object_lookup(dev, file_priv, | 
|  | 3367 | reloc.target_handle); | 
|  | 3368 | if (target_obj == NULL) { | 
|  | 3369 | ret = -ENOENT; | 
|  | 3370 | break; | 
|  | 3371 | } | 
|  | 3372 |  | 
|  | 3373 | target_handle = reloc.target_handle; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3374 | } | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3375 | target_offset = to_intel_bo(target_obj)->gtt_offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3376 |  | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3377 | #if WATCH_RELOC | 
|  | 3378 | DRM_INFO("%s: obj %p offset %08x target %d " | 
|  | 3379 | "read %08x write %08x gtt %08x " | 
|  | 3380 | "presumed %08x delta %08x\n", | 
|  | 3381 | __func__, | 
|  | 3382 | obj, | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3383 | (int) reloc.offset, | 
|  | 3384 | (int) reloc.target_handle, | 
|  | 3385 | (int) reloc.read_domains, | 
|  | 3386 | (int) reloc.write_domain, | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3387 | (int) target_offset, | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3388 | (int) reloc.presumed_offset, | 
|  | 3389 | reloc.delta); | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3390 | #endif | 
|  | 3391 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3392 | /* The target buffer should have appeared before us in the | 
|  | 3393 | * exec_object list, so it should have a GTT space bound by now. | 
|  | 3394 | */ | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3395 | if (target_offset == 0) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3396 | DRM_ERROR("No GTT space found for object %d\n", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3397 | reloc.target_handle); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3398 | ret = -EINVAL; | 
|  | 3399 | break; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3400 | } | 
|  | 3401 |  | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3402 | /* Validate that the target is in a valid r/w GPU domain */ | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3403 | if (reloc.write_domain & (reloc.write_domain - 1)) { | 
| Daniel Vetter | 16edd55 | 2010-02-19 11:52:02 +0100 | [diff] [blame] | 3404 | DRM_ERROR("reloc with multiple write domains: " | 
|  | 3405 | "obj %p target %d offset %d " | 
|  | 3406 | "read %08x write %08x", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3407 | obj, reloc.target_handle, | 
|  | 3408 | (int) reloc.offset, | 
|  | 3409 | reloc.read_domains, | 
|  | 3410 | reloc.write_domain); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3411 | ret = -EINVAL; | 
|  | 3412 | break; | 
| Daniel Vetter | 16edd55 | 2010-02-19 11:52:02 +0100 | [diff] [blame] | 3413 | } | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3414 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | 
|  | 3415 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3416 | DRM_ERROR("reloc with read/write CPU domains: " | 
|  | 3417 | "obj %p target %d offset %d " | 
|  | 3418 | "read %08x write %08x", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3419 | obj, reloc.target_handle, | 
|  | 3420 | (int) reloc.offset, | 
|  | 3421 | reloc.read_domains, | 
|  | 3422 | reloc.write_domain); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3423 | ret = -EINVAL; | 
|  | 3424 | break; | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3425 | } | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3426 | if (reloc.write_domain && target_obj->pending_write_domain && | 
|  | 3427 | reloc.write_domain != target_obj->pending_write_domain) { | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3428 | DRM_ERROR("Write domain conflict: " | 
|  | 3429 | "obj %p target %d offset %d " | 
|  | 3430 | "new %08x old %08x\n", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3431 | obj, reloc.target_handle, | 
|  | 3432 | (int) reloc.offset, | 
|  | 3433 | reloc.write_domain, | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3434 | target_obj->pending_write_domain); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3435 | ret = -EINVAL; | 
|  | 3436 | break; | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3437 | } | 
|  | 3438 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3439 | target_obj->pending_read_domains |= reloc.read_domains; | 
| Chris Wilson | 878a3c3 | 2010-10-22 10:48:12 +0100 | [diff] [blame] | 3440 | target_obj->pending_write_domain |= reloc.write_domain; | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3441 |  | 
|  | 3442 | /* If the relocation already has the right value in it, no | 
|  | 3443 | * more work needs to be done. | 
|  | 3444 | */ | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3445 | if (target_offset == reloc.presumed_offset) | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3446 | continue; | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3447 |  | 
|  | 3448 | /* Check that the relocation address is valid... */ | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3449 | if (reloc.offset > obj->base.size - 4) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3450 | DRM_ERROR("Relocation beyond object bounds: " | 
|  | 3451 | "obj %p target %d offset %d size %d.\n", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3452 | obj, reloc.target_handle, | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3453 | (int) reloc.offset, (int) obj->base.size); | 
|  | 3454 | ret = -EINVAL; | 
|  | 3455 | break; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3456 | } | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3457 | if (reloc.offset & 3) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3458 | DRM_ERROR("Relocation not 4-byte aligned: " | 
|  | 3459 | "obj %p target %d offset %d.\n", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3460 | obj, reloc.target_handle, | 
|  | 3461 | (int) reloc.offset); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3462 | ret = -EINVAL; | 
|  | 3463 | break; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3464 | } | 
|  | 3465 |  | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3466 | /* and points to somewhere within the target object. */ | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3467 | if (reloc.delta >= target_obj->size) { | 
| Chris Wilson | cd0b9fb | 2009-09-15 23:23:18 +0100 | [diff] [blame] | 3468 | DRM_ERROR("Relocation beyond target object bounds: " | 
|  | 3469 | "obj %p target %d delta %d size %d.\n", | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3470 | obj, reloc.target_handle, | 
|  | 3471 | (int) reloc.delta, (int) target_obj->size); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3472 | ret = -EINVAL; | 
|  | 3473 | break; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3474 | } | 
|  | 3475 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3476 | reloc.delta += target_offset; | 
|  | 3477 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3478 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; | 
|  | 3479 | char *vaddr; | 
|  | 3480 |  | 
| Linus Torvalds | c48c43e | 2010-10-26 18:57:59 -0700 | [diff] [blame] | 3481 | vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3482 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; | 
| Linus Torvalds | c48c43e | 2010-10-26 18:57:59 -0700 | [diff] [blame] | 3483 | kunmap_atomic(vaddr); | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3484 | } else { | 
|  | 3485 | uint32_t __iomem *reloc_entry; | 
|  | 3486 | void __iomem *reloc_page; | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3487 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3488 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | 
|  | 3489 | if (ret) | 
|  | 3490 | break; | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3491 |  | 
|  | 3492 | /* Map the page containing the relocation we're going to perform.  */ | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3493 | reloc.offset += obj->gtt_offset; | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3494 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 
| Linus Torvalds | c48c43e | 2010-10-26 18:57:59 -0700 | [diff] [blame] | 3495 | reloc.offset & PAGE_MASK); | 
| Chris Wilson | f0c43d9 | 2010-10-14 12:44:48 +0100 | [diff] [blame] | 3496 | reloc_entry = (uint32_t __iomem *) | 
|  | 3497 | (reloc_page + (reloc.offset & ~PAGE_MASK)); | 
|  | 3498 | iowrite32(reloc.delta, reloc_entry); | 
| Linus Torvalds | c48c43e | 2010-10-26 18:57:59 -0700 | [diff] [blame] | 3499 | io_mapping_unmap_atomic(reloc_page); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3500 | } | 
|  | 3501 |  | 
| Chris Wilson | b5dc608 | 2010-10-20 20:59:57 +0100 | [diff] [blame] | 3502 | /* and update the user's relocation entry */ | 
|  | 3503 | reloc.presumed_offset = target_offset; | 
|  | 3504 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | 
|  | 3505 | &reloc.presumed_offset, | 
|  | 3506 | sizeof(reloc.presumed_offset))) { | 
|  | 3507 | ret = -EFAULT; | 
|  | 3508 | break; | 
|  | 3509 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3510 | } | 
|  | 3511 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3512 | drm_gem_object_unreference(target_obj); | 
|  | 3513 | return ret; | 
|  | 3514 | } | 
|  | 3515 |  | 
|  | 3516 | static int | 
|  | 3517 | i915_gem_execbuffer_pin(struct drm_device *dev, | 
|  | 3518 | struct drm_file *file, | 
|  | 3519 | struct drm_gem_object **object_list, | 
|  | 3520 | struct drm_i915_gem_exec_object2 *exec_list, | 
|  | 3521 | int count) | 
|  | 3522 | { | 
|  | 3523 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 3524 | int ret, i, retry; | 
|  | 3525 |  | 
|  | 3526 | /* attempt to pin all of the buffers into the GTT */ | 
| Chris Wilson | 5eac3ab | 2010-10-31 08:49:47 +0000 | [diff] [blame] | 3527 | retry = 0; | 
|  | 3528 | do { | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3529 | ret = 0; | 
|  | 3530 | for (i = 0; i < count; i++) { | 
|  | 3531 | struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; | 
| Daniel Vetter | 16e809a | 2010-09-16 19:37:04 +0200 | [diff] [blame] | 3532 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3533 | bool need_fence = | 
|  | 3534 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 
|  | 3535 | obj->tiling_mode != I915_TILING_NONE; | 
|  | 3536 |  | 
| Daniel Vetter | 16e809a | 2010-09-16 19:37:04 +0200 | [diff] [blame] | 3537 | /* g33/pnv can't fence buffers in the unmappable part */ | 
|  | 3538 | bool need_mappable = | 
|  | 3539 | entry->relocation_count ? true : need_fence; | 
|  | 3540 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3541 | /* Check fence reg constraints and rebind if necessary */ | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 3542 | if (need_mappable && !obj->map_and_fenceable) { | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3543 | ret = i915_gem_object_unbind(&obj->base); | 
|  | 3544 | if (ret) | 
|  | 3545 | break; | 
|  | 3546 | } | 
|  | 3547 |  | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 3548 | ret = i915_gem_object_pin(&obj->base, | 
| Daniel Vetter | 16e809a | 2010-09-16 19:37:04 +0200 | [diff] [blame] | 3549 | entry->alignment, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 3550 | need_mappable); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3551 | if (ret) | 
|  | 3552 | break; | 
|  | 3553 |  | 
|  | 3554 | /* | 
|  | 3555 | * Pre-965 chips need a fence register set up in order | 
|  | 3556 | * to properly handle blits to/from tiled surfaces. | 
|  | 3557 | */ | 
|  | 3558 | if (need_fence) { | 
|  | 3559 | ret = i915_gem_object_get_fence_reg(&obj->base, true); | 
|  | 3560 | if (ret) { | 
|  | 3561 | i915_gem_object_unpin(&obj->base); | 
|  | 3562 | break; | 
|  | 3563 | } | 
|  | 3564 |  | 
|  | 3565 | dev_priv->fence_regs[obj->fence_reg].gpu = true; | 
|  | 3566 | } | 
|  | 3567 |  | 
|  | 3568 | entry->offset = obj->gtt_offset; | 
|  | 3569 | } | 
|  | 3570 |  | 
|  | 3571 | while (i--) | 
|  | 3572 | i915_gem_object_unpin(object_list[i]); | 
|  | 3573 |  | 
| Chris Wilson | 5eac3ab | 2010-10-31 08:49:47 +0000 | [diff] [blame] | 3574 | if (ret != -ENOSPC || retry > 1) | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3575 | return ret; | 
|  | 3576 |  | 
| Chris Wilson | 5eac3ab | 2010-10-31 08:49:47 +0000 | [diff] [blame] | 3577 | /* First attempt, just clear anything that is purgeable. | 
|  | 3578 | * Second attempt, clear the entire GTT. | 
|  | 3579 | */ | 
|  | 3580 | ret = i915_gem_evict_everything(dev, retry == 0); | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3581 | if (ret) | 
|  | 3582 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3583 |  | 
| Chris Wilson | 5eac3ab | 2010-10-31 08:49:47 +0000 | [diff] [blame] | 3584 | retry++; | 
|  | 3585 | } while (1); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3586 | } | 
|  | 3587 |  | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3588 | static int | 
|  | 3589 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | 
|  | 3590 | struct drm_file *file, | 
|  | 3591 | struct intel_ring_buffer *ring, | 
|  | 3592 | struct drm_gem_object **objects, | 
|  | 3593 | int count) | 
|  | 3594 | { | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3595 | struct change_domains cd; | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3596 | int ret, i; | 
|  | 3597 |  | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3598 | cd.invalidate_domains = 0; | 
|  | 3599 | cd.flush_domains = 0; | 
|  | 3600 | cd.flush_rings = 0; | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3601 | for (i = 0; i < count; i++) | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3602 | i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd); | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3603 |  | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3604 | if (cd.invalidate_domains | cd.flush_domains) { | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3605 | #if WATCH_EXEC | 
|  | 3606 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | 
|  | 3607 | __func__, | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3608 | cd.invalidate_domains, | 
|  | 3609 | cd.flush_domains); | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3610 | #endif | 
|  | 3611 | i915_gem_flush(dev, file, | 
| Chris Wilson | 0f8c6d7 | 2010-11-01 12:38:44 +0000 | [diff] [blame] | 3612 | cd.invalidate_domains, | 
|  | 3613 | cd.flush_domains, | 
|  | 3614 | cd.flush_rings); | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3615 | } | 
|  | 3616 |  | 
|  | 3617 | for (i = 0; i < count; i++) { | 
|  | 3618 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | 
|  | 3619 | /* XXX replace with semaphores */ | 
|  | 3620 | if (obj->ring && ring != obj->ring) { | 
|  | 3621 | ret = i915_gem_object_wait_rendering(&obj->base, true); | 
|  | 3622 | if (ret) | 
|  | 3623 | return ret; | 
|  | 3624 | } | 
|  | 3625 | } | 
|  | 3626 |  | 
|  | 3627 | return 0; | 
|  | 3628 | } | 
|  | 3629 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3630 | /* Throttle our rendering by waiting until the ring has completed our requests | 
|  | 3631 | * emitted over 20 msec ago. | 
|  | 3632 | * | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3633 | * Note that if we were to use the current jiffies each time around the loop, | 
|  | 3634 | * we wouldn't escape the function with any frames outstanding if the time to | 
|  | 3635 | * render a frame was over 20ms. | 
|  | 3636 | * | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3637 | * This should get us reasonable parallelism between CPU and GPU but also | 
|  | 3638 | * relatively low latency when blocking on a particular request to finish. | 
|  | 3639 | */ | 
|  | 3640 | static int | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3641 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3642 | { | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3643 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 3644 | struct drm_i915_file_private *file_priv = file->driver_priv; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3645 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3646 | struct drm_i915_gem_request *request; | 
|  | 3647 | struct intel_ring_buffer *ring = NULL; | 
|  | 3648 | u32 seqno = 0; | 
|  | 3649 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3650 |  | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 3651 | spin_lock(&file_priv->mm.lock); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3652 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3653 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 
|  | 3654 | break; | 
|  | 3655 |  | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3656 | ring = request->ring; | 
|  | 3657 | seqno = request->seqno; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3658 | } | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 3659 | spin_unlock(&file_priv->mm.lock); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3660 |  | 
|  | 3661 | if (seqno == 0) | 
|  | 3662 | return 0; | 
|  | 3663 |  | 
|  | 3664 | ret = 0; | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 3665 | if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3666 | /* And wait for the seqno passing without holding any locks and | 
|  | 3667 | * causing extra latency for others. This is safe as the irq | 
|  | 3668 | * generation is designed to be run atomically and so is | 
|  | 3669 | * lockless. | 
|  | 3670 | */ | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 3671 | ring->user_irq_get(ring); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3672 | ret = wait_event_interruptible(ring->irq_queue, | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 3673 | i915_seqno_passed(ring->get_seqno(ring), seqno) | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3674 | || atomic_read(&dev_priv->mm.wedged)); | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 3675 | ring->user_irq_put(ring); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 3676 |  | 
|  | 3677 | if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) | 
|  | 3678 | ret = -EIO; | 
|  | 3679 | } | 
|  | 3680 |  | 
|  | 3681 | if (ret == 0) | 
|  | 3682 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3683 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3684 | return ret; | 
|  | 3685 | } | 
|  | 3686 |  | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3687 | static int | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3688 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, | 
|  | 3689 | uint64_t exec_offset) | 
| Chris Wilson | 83d6079 | 2009-06-06 09:45:57 +0100 | [diff] [blame] | 3690 | { | 
|  | 3691 | uint32_t exec_start, exec_len; | 
|  | 3692 |  | 
|  | 3693 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 
|  | 3694 | exec_len = (uint32_t) exec->batch_len; | 
|  | 3695 |  | 
|  | 3696 | if ((exec_start | exec_len) & 0x7) | 
|  | 3697 | return -EINVAL; | 
|  | 3698 |  | 
|  | 3699 | if (!exec_start) | 
|  | 3700 | return -EINVAL; | 
|  | 3701 |  | 
|  | 3702 | return 0; | 
|  | 3703 | } | 
|  | 3704 |  | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3705 | static int | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3706 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | 
|  | 3707 | int count) | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3708 | { | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3709 | int i; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3710 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3711 | for (i = 0; i < count; i++) { | 
|  | 3712 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | 
|  | 3713 | size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3714 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3715 | if (!access_ok(VERIFY_READ, ptr, length)) | 
|  | 3716 | return -EFAULT; | 
|  | 3717 |  | 
| Chris Wilson | b5dc608 | 2010-10-20 20:59:57 +0100 | [diff] [blame] | 3718 | /* we may also need to update the presumed offsets */ | 
|  | 3719 | if (!access_ok(VERIFY_WRITE, ptr, length)) | 
|  | 3720 | return -EFAULT; | 
|  | 3721 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3722 | if (fault_in_pages_readable(ptr, length)) | 
|  | 3723 | return -EFAULT; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3724 | } | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3725 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3726 | return 0; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3727 | } | 
|  | 3728 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3729 | static int | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3730 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3731 | struct drm_file *file, | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3732 | struct drm_i915_gem_execbuffer2 *args, | 
|  | 3733 | struct drm_i915_gem_exec_object2 *exec_list) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3734 | { | 
|  | 3735 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3736 | struct drm_gem_object **object_list = NULL; | 
|  | 3737 | struct drm_gem_object *batch_obj; | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3738 | struct drm_clip_rect *cliprects = NULL; | 
| Chris Wilson | 8dc5d14 | 2010-08-12 12:36:12 +0100 | [diff] [blame] | 3739 | struct drm_i915_gem_request *request = NULL; | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3740 | int ret, i, flips; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3741 | uint64_t exec_offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3742 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3743 | struct intel_ring_buffer *ring = NULL; | 
|  | 3744 |  | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 3745 | ret = i915_gem_check_is_wedged(dev); | 
|  | 3746 | if (ret) | 
|  | 3747 | return ret; | 
|  | 3748 |  | 
| Chris Wilson | 2549d6c | 2010-10-14 12:10:41 +0100 | [diff] [blame] | 3749 | ret = validate_exec_list(exec_list, args->buffer_count); | 
|  | 3750 | if (ret) | 
|  | 3751 | return ret; | 
|  | 3752 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3753 | #if WATCH_EXEC | 
|  | 3754 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 
|  | 3755 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 
|  | 3756 | #endif | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 3757 | switch (args->flags & I915_EXEC_RING_MASK) { | 
|  | 3758 | case I915_EXEC_DEFAULT: | 
|  | 3759 | case I915_EXEC_RENDER: | 
|  | 3760 | ring = &dev_priv->render_ring; | 
|  | 3761 | break; | 
|  | 3762 | case I915_EXEC_BSD: | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 3763 | if (!HAS_BSD(dev)) { | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 3764 | DRM_ERROR("execbuf with invalid ring (BSD)\n"); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 3765 | return -EINVAL; | 
|  | 3766 | } | 
|  | 3767 | ring = &dev_priv->bsd_ring; | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 3768 | break; | 
|  | 3769 | case I915_EXEC_BLT: | 
|  | 3770 | if (!HAS_BLT(dev)) { | 
|  | 3771 | DRM_ERROR("execbuf with invalid ring (BLT)\n"); | 
|  | 3772 | return -EINVAL; | 
|  | 3773 | } | 
|  | 3774 | ring = &dev_priv->blt_ring; | 
|  | 3775 | break; | 
|  | 3776 | default: | 
|  | 3777 | DRM_ERROR("execbuf with unknown ring: %d\n", | 
|  | 3778 | (int)(args->flags & I915_EXEC_RING_MASK)); | 
|  | 3779 | return -EINVAL; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 3780 | } | 
|  | 3781 |  | 
| Eric Anholt | 4f481ed | 2008-09-10 14:22:49 -0700 | [diff] [blame] | 3782 | if (args->buffer_count < 1) { | 
|  | 3783 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 
|  | 3784 | return -EINVAL; | 
|  | 3785 | } | 
| Eric Anholt | c8e0f93 | 2009-11-22 03:49:37 +0100 | [diff] [blame] | 3786 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3787 | if (object_list == NULL) { | 
|  | 3788 | DRM_ERROR("Failed to allocate object list for %d buffers\n", | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3789 | args->buffer_count); | 
|  | 3790 | ret = -ENOMEM; | 
|  | 3791 | goto pre_mutex_err; | 
|  | 3792 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3793 |  | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3794 | if (args->num_cliprects != 0) { | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3795 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 
|  | 3796 | GFP_KERNEL); | 
| Owain Ainsworth | a40e8d3 | 2010-02-09 14:25:55 +0000 | [diff] [blame] | 3797 | if (cliprects == NULL) { | 
|  | 3798 | ret = -ENOMEM; | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3799 | goto pre_mutex_err; | 
| Owain Ainsworth | a40e8d3 | 2010-02-09 14:25:55 +0000 | [diff] [blame] | 3800 | } | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3801 |  | 
|  | 3802 | ret = copy_from_user(cliprects, | 
|  | 3803 | (struct drm_clip_rect __user *) | 
|  | 3804 | (uintptr_t) args->cliprects_ptr, | 
|  | 3805 | sizeof(*cliprects) * args->num_cliprects); | 
|  | 3806 | if (ret != 0) { | 
|  | 3807 | DRM_ERROR("copy %d cliprects failed: %d\n", | 
|  | 3808 | args->num_cliprects, ret); | 
| Dan Carpenter | c877cdc | 2010-06-23 19:03:01 +0200 | [diff] [blame] | 3809 | ret = -EFAULT; | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3810 | goto pre_mutex_err; | 
|  | 3811 | } | 
|  | 3812 | } | 
|  | 3813 |  | 
| Chris Wilson | 8dc5d14 | 2010-08-12 12:36:12 +0100 | [diff] [blame] | 3814 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 
|  | 3815 | if (request == NULL) { | 
|  | 3816 | ret = -ENOMEM; | 
| Chris Wilson | a198bc8 | 2009-02-06 16:55:20 +0000 | [diff] [blame] | 3817 | goto pre_mutex_err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3818 | } | 
|  | 3819 |  | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 3820 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 3821 | if (ret) | 
|  | 3822 | goto pre_mutex_err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3823 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3824 | if (dev_priv->mm.suspended) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3825 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | a198bc8 | 2009-02-06 16:55:20 +0000 | [diff] [blame] | 3826 | ret = -EBUSY; | 
|  | 3827 | goto pre_mutex_err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3828 | } | 
|  | 3829 |  | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3830 | /* Look up object handles */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3831 | for (i = 0; i < args->buffer_count; i++) { | 
| Chris Wilson | 7e318e1 | 2010-10-27 13:43:39 +0100 | [diff] [blame] | 3832 | struct drm_i915_gem_object *obj_priv; | 
|  | 3833 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3834 | object_list[i] = drm_gem_object_lookup(dev, file, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3835 | exec_list[i].handle); | 
|  | 3836 | if (object_list[i] == NULL) { | 
|  | 3837 | DRM_ERROR("Invalid object handle %d at index %d\n", | 
|  | 3838 | exec_list[i].handle, i); | 
| Chris Wilson | 0ce907f | 2010-01-23 20:26:35 +0000 | [diff] [blame] | 3839 | /* prevent error path from reading uninitialized data */ | 
|  | 3840 | args->buffer_count = i + 1; | 
| Chris Wilson | bf79cb9 | 2010-08-04 14:19:46 +0100 | [diff] [blame] | 3841 | ret = -ENOENT; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3842 | goto err; | 
|  | 3843 | } | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3844 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3845 | obj_priv = to_intel_bo(object_list[i]); | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3846 | if (obj_priv->in_execbuffer) { | 
|  | 3847 | DRM_ERROR("Object %p appears more than once in object list\n", | 
|  | 3848 | object_list[i]); | 
| Chris Wilson | 0ce907f | 2010-01-23 20:26:35 +0000 | [diff] [blame] | 3849 | /* prevent error path from reading uninitialized data */ | 
|  | 3850 | args->buffer_count = i + 1; | 
| Chris Wilson | bf79cb9 | 2010-08-04 14:19:46 +0100 | [diff] [blame] | 3851 | ret = -EINVAL; | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3852 | goto err; | 
|  | 3853 | } | 
|  | 3854 | obj_priv->in_execbuffer = true; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3855 | } | 
|  | 3856 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3857 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 
|  | 3858 | ret = i915_gem_execbuffer_pin(dev, file, | 
|  | 3859 | object_list, exec_list, | 
|  | 3860 | args->buffer_count); | 
|  | 3861 | if (ret) | 
|  | 3862 | goto err; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3863 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3864 | /* The objects are in their final locations, apply the relocations. */ | 
|  | 3865 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3866 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 
|  | 3867 | obj->base.pending_read_domains = 0; | 
|  | 3868 | obj->base.pending_write_domain = 0; | 
|  | 3869 | ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3870 | if (ret) | 
|  | 3871 | goto err; | 
|  | 3872 | } | 
|  | 3873 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3874 | /* Set the pending read domains for the batch buffer to COMMAND */ | 
|  | 3875 | batch_obj = object_list[args->buffer_count-1]; | 
| Chris Wilson | 5f26a2c | 2009-06-06 09:45:58 +0100 | [diff] [blame] | 3876 | if (batch_obj->pending_write_domain) { | 
|  | 3877 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | 
|  | 3878 | ret = -EINVAL; | 
|  | 3879 | goto err; | 
|  | 3880 | } | 
|  | 3881 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3882 |  | 
| Chris Wilson | 9af90d1 | 2010-10-17 10:01:56 +0100 | [diff] [blame] | 3883 | /* Sanity check the batch buffer */ | 
|  | 3884 | exec_offset = to_intel_bo(batch_obj)->gtt_offset; | 
|  | 3885 | ret = i915_gem_check_execbuffer(args, exec_offset); | 
| Chris Wilson | 83d6079 | 2009-06-06 09:45:57 +0100 | [diff] [blame] | 3886 | if (ret != 0) { | 
|  | 3887 | DRM_ERROR("execbuf with invalid offset/length\n"); | 
|  | 3888 | goto err; | 
|  | 3889 | } | 
|  | 3890 |  | 
| Chris Wilson | 13b2928 | 2010-11-01 12:22:48 +0000 | [diff] [blame] | 3891 | ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, | 
|  | 3892 | object_list, args->buffer_count); | 
|  | 3893 | if (ret) | 
|  | 3894 | goto err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3895 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3896 | #if WATCH_COHERENCY | 
|  | 3897 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3898 | i915_gem_object_check_coherency(object_list[i], | 
|  | 3899 | exec_list[i].handle); | 
|  | 3900 | } | 
|  | 3901 | #endif | 
|  | 3902 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3903 | #if WATCH_EXEC | 
| Ben Gamari | 6911a9b | 2009-04-02 11:24:54 -0700 | [diff] [blame] | 3904 | i915_gem_dump_object(batch_obj, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3905 | args->batch_len, | 
|  | 3906 | __func__, | 
|  | 3907 | ~0); | 
|  | 3908 | #endif | 
|  | 3909 |  | 
| Chris Wilson | e59f2ba | 2010-10-07 17:28:15 +0100 | [diff] [blame] | 3910 | /* Check for any pending flips. As we only maintain a flip queue depth | 
|  | 3911 | * of 1, we can simply insert a WAIT for the next display flip prior | 
|  | 3912 | * to executing the batch and avoid stalling the CPU. | 
|  | 3913 | */ | 
|  | 3914 | flips = 0; | 
|  | 3915 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3916 | if (object_list[i]->write_domain) | 
|  | 3917 | flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); | 
|  | 3918 | } | 
|  | 3919 | if (flips) { | 
|  | 3920 | int plane, flip_mask; | 
|  | 3921 |  | 
|  | 3922 | for (plane = 0; flips >> plane; plane++) { | 
|  | 3923 | if (((flips >> plane) & 1) == 0) | 
|  | 3924 | continue; | 
|  | 3925 |  | 
|  | 3926 | if (plane) | 
|  | 3927 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | 
|  | 3928 | else | 
|  | 3929 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | 
|  | 3930 |  | 
| Chris Wilson | e1f99ce | 2010-10-27 12:45:26 +0100 | [diff] [blame] | 3931 | ret = intel_ring_begin(ring, 2); | 
|  | 3932 | if (ret) | 
|  | 3933 | goto err; | 
|  | 3934 |  | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 3935 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); | 
|  | 3936 | intel_ring_emit(ring, MI_NOOP); | 
|  | 3937 | intel_ring_advance(ring); | 
| Chris Wilson | e59f2ba | 2010-10-07 17:28:15 +0100 | [diff] [blame] | 3938 | } | 
|  | 3939 | } | 
|  | 3940 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3941 | /* Exec the batchbuffer */ | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 3942 | ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3943 | if (ret) { | 
|  | 3944 | DRM_ERROR("dispatch failed %d\n", ret); | 
|  | 3945 | goto err; | 
|  | 3946 | } | 
|  | 3947 |  | 
| Chris Wilson | 7e318e1 | 2010-10-27 13:43:39 +0100 | [diff] [blame] | 3948 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3949 | struct drm_gem_object *obj = object_list[i]; | 
|  | 3950 |  | 
|  | 3951 | obj->read_domains = obj->pending_read_domains; | 
|  | 3952 | obj->write_domain = obj->pending_write_domain; | 
|  | 3953 |  | 
|  | 3954 | i915_gem_object_move_to_active(obj, ring); | 
|  | 3955 | if (obj->write_domain) { | 
|  | 3956 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
|  | 3957 | obj_priv->dirty = 1; | 
|  | 3958 | list_move_tail(&obj_priv->gpu_write_list, | 
|  | 3959 | &ring->gpu_write_list); | 
|  | 3960 | intel_mark_busy(dev, obj); | 
|  | 3961 | } | 
|  | 3962 |  | 
|  | 3963 | trace_i915_gem_object_change_domain(obj, | 
|  | 3964 | obj->read_domains, | 
|  | 3965 | obj->write_domain); | 
|  | 3966 | } | 
|  | 3967 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3968 | /* | 
|  | 3969 | * Ensure that the commands in the batch buffer are | 
|  | 3970 | * finished before the interrupt fires | 
|  | 3971 | */ | 
| Daniel Vetter | 8a1a49f | 2010-02-11 22:29:04 +0100 | [diff] [blame] | 3972 | i915_retire_commands(dev, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3973 |  | 
| Chris Wilson | 3cce469 | 2010-10-27 16:11:02 +0100 | [diff] [blame] | 3974 | if (i915_add_request(dev, file, request, ring)) | 
|  | 3975 | ring->outstanding_lazy_request = true; | 
|  | 3976 | else | 
|  | 3977 | request = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3978 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3979 | err: | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3980 | for (i = 0; i < args->buffer_count; i++) { | 
| Chris Wilson | 7e318e1 | 2010-10-27 13:43:39 +0100 | [diff] [blame] | 3981 | if (object_list[i] == NULL) | 
|  | 3982 | break; | 
|  | 3983 |  | 
|  | 3984 | to_intel_bo(object_list[i])->in_execbuffer = false; | 
| Julia Lawall | aad87df | 2008-12-21 16:28:47 +0100 | [diff] [blame] | 3985 | drm_gem_object_unreference(object_list[i]); | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3986 | } | 
| Julia Lawall | aad87df | 2008-12-21 16:28:47 +0100 | [diff] [blame] | 3987 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3988 | mutex_unlock(&dev->struct_mutex); | 
|  | 3989 |  | 
| Chris Wilson | 93533c2 | 2010-01-31 10:40:48 +0000 | [diff] [blame] | 3990 | pre_mutex_err: | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 3991 | drm_free_large(object_list); | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3992 | kfree(cliprects); | 
| Chris Wilson | 8dc5d14 | 2010-08-12 12:36:12 +0100 | [diff] [blame] | 3993 | kfree(request); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3994 |  | 
|  | 3995 | return ret; | 
|  | 3996 | } | 
|  | 3997 |  | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3998 | /* | 
|  | 3999 | * Legacy execbuffer just creates an exec2 list from the original exec object | 
|  | 4000 | * list array and passes it to the real function. | 
|  | 4001 | */ | 
|  | 4002 | int | 
|  | 4003 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 
|  | 4004 | struct drm_file *file_priv) | 
|  | 4005 | { | 
|  | 4006 | struct drm_i915_gem_execbuffer *args = data; | 
|  | 4007 | struct drm_i915_gem_execbuffer2 exec2; | 
|  | 4008 | struct drm_i915_gem_exec_object *exec_list = NULL; | 
|  | 4009 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 
|  | 4010 | int ret, i; | 
|  | 4011 |  | 
|  | 4012 | #if WATCH_EXEC | 
|  | 4013 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 
|  | 4014 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 
|  | 4015 | #endif | 
|  | 4016 |  | 
|  | 4017 | if (args->buffer_count < 1) { | 
|  | 4018 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 
|  | 4019 | return -EINVAL; | 
|  | 4020 | } | 
|  | 4021 |  | 
|  | 4022 | /* Copy in the exec list from userland */ | 
|  | 4023 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | 
|  | 4024 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | 
|  | 4025 | if (exec_list == NULL || exec2_list == NULL) { | 
|  | 4026 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 
|  | 4027 | args->buffer_count); | 
|  | 4028 | drm_free_large(exec_list); | 
|  | 4029 | drm_free_large(exec2_list); | 
|  | 4030 | return -ENOMEM; | 
|  | 4031 | } | 
|  | 4032 | ret = copy_from_user(exec_list, | 
|  | 4033 | (struct drm_i915_relocation_entry __user *) | 
|  | 4034 | (uintptr_t) args->buffers_ptr, | 
|  | 4035 | sizeof(*exec_list) * args->buffer_count); | 
|  | 4036 | if (ret != 0) { | 
|  | 4037 | DRM_ERROR("copy %d exec entries failed %d\n", | 
|  | 4038 | args->buffer_count, ret); | 
|  | 4039 | drm_free_large(exec_list); | 
|  | 4040 | drm_free_large(exec2_list); | 
|  | 4041 | return -EFAULT; | 
|  | 4042 | } | 
|  | 4043 |  | 
|  | 4044 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 4045 | exec2_list[i].handle = exec_list[i].handle; | 
|  | 4046 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | 
|  | 4047 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | 
|  | 4048 | exec2_list[i].alignment = exec_list[i].alignment; | 
|  | 4049 | exec2_list[i].offset = exec_list[i].offset; | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 4050 | if (INTEL_INFO(dev)->gen < 4) | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4051 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | 
|  | 4052 | else | 
|  | 4053 | exec2_list[i].flags = 0; | 
|  | 4054 | } | 
|  | 4055 |  | 
|  | 4056 | exec2.buffers_ptr = args->buffers_ptr; | 
|  | 4057 | exec2.buffer_count = args->buffer_count; | 
|  | 4058 | exec2.batch_start_offset = args->batch_start_offset; | 
|  | 4059 | exec2.batch_len = args->batch_len; | 
|  | 4060 | exec2.DR1 = args->DR1; | 
|  | 4061 | exec2.DR4 = args->DR4; | 
|  | 4062 | exec2.num_cliprects = args->num_cliprects; | 
|  | 4063 | exec2.cliprects_ptr = args->cliprects_ptr; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4064 | exec2.flags = I915_EXEC_RENDER; | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4065 |  | 
|  | 4066 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | 
|  | 4067 | if (!ret) { | 
|  | 4068 | /* Copy the new buffer offsets back to the user's exec list. */ | 
|  | 4069 | for (i = 0; i < args->buffer_count; i++) | 
|  | 4070 | exec_list[i].offset = exec2_list[i].offset; | 
|  | 4071 | /* ... and back out to userspace */ | 
|  | 4072 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 
|  | 4073 | (uintptr_t) args->buffers_ptr, | 
|  | 4074 | exec_list, | 
|  | 4075 | sizeof(*exec_list) * args->buffer_count); | 
|  | 4076 | if (ret) { | 
|  | 4077 | ret = -EFAULT; | 
|  | 4078 | DRM_ERROR("failed to copy %d exec entries " | 
|  | 4079 | "back to user (%d)\n", | 
|  | 4080 | args->buffer_count, ret); | 
|  | 4081 | } | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4082 | } | 
|  | 4083 |  | 
|  | 4084 | drm_free_large(exec_list); | 
|  | 4085 | drm_free_large(exec2_list); | 
|  | 4086 | return ret; | 
|  | 4087 | } | 
|  | 4088 |  | 
|  | 4089 | int | 
|  | 4090 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | 
|  | 4091 | struct drm_file *file_priv) | 
|  | 4092 | { | 
|  | 4093 | struct drm_i915_gem_execbuffer2 *args = data; | 
|  | 4094 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 
|  | 4095 | int ret; | 
|  | 4096 |  | 
|  | 4097 | #if WATCH_EXEC | 
|  | 4098 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 
|  | 4099 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 
|  | 4100 | #endif | 
|  | 4101 |  | 
|  | 4102 | if (args->buffer_count < 1) { | 
|  | 4103 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | 
|  | 4104 | return -EINVAL; | 
|  | 4105 | } | 
|  | 4106 |  | 
|  | 4107 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | 
|  | 4108 | if (exec2_list == NULL) { | 
|  | 4109 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 
|  | 4110 | args->buffer_count); | 
|  | 4111 | return -ENOMEM; | 
|  | 4112 | } | 
|  | 4113 | ret = copy_from_user(exec2_list, | 
|  | 4114 | (struct drm_i915_relocation_entry __user *) | 
|  | 4115 | (uintptr_t) args->buffers_ptr, | 
|  | 4116 | sizeof(*exec2_list) * args->buffer_count); | 
|  | 4117 | if (ret != 0) { | 
|  | 4118 | DRM_ERROR("copy %d exec entries failed %d\n", | 
|  | 4119 | args->buffer_count, ret); | 
|  | 4120 | drm_free_large(exec2_list); | 
|  | 4121 | return -EFAULT; | 
|  | 4122 | } | 
|  | 4123 |  | 
|  | 4124 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | 
|  | 4125 | if (!ret) { | 
|  | 4126 | /* Copy the new buffer offsets back to the user's exec list. */ | 
|  | 4127 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 
|  | 4128 | (uintptr_t) args->buffers_ptr, | 
|  | 4129 | exec2_list, | 
|  | 4130 | sizeof(*exec2_list) * args->buffer_count); | 
|  | 4131 | if (ret) { | 
|  | 4132 | ret = -EFAULT; | 
|  | 4133 | DRM_ERROR("failed to copy %d exec entries " | 
|  | 4134 | "back to user (%d)\n", | 
|  | 4135 | args->buffer_count, ret); | 
|  | 4136 | } | 
|  | 4137 | } | 
|  | 4138 |  | 
|  | 4139 | drm_free_large(exec2_list); | 
|  | 4140 | return ret; | 
|  | 4141 | } | 
|  | 4142 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4143 | int | 
| Daniel Vetter | 920afa7 | 2010-09-16 17:54:23 +0200 | [diff] [blame] | 4144 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4145 | bool map_and_fenceable) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4146 | { | 
|  | 4147 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | f13d3f7 | 2010-09-20 17:36:15 +0100 | [diff] [blame] | 4148 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4149 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4150 | int ret; | 
|  | 4151 |  | 
| Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 4152 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4153 | BUG_ON(map_and_fenceable && !map_and_fenceable); | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 4154 | WARN_ON(i915_verify_lists(dev)); | 
| Chris Wilson | ac0c6b5 | 2010-05-27 13:18:18 +0100 | [diff] [blame] | 4155 |  | 
|  | 4156 | if (obj_priv->gtt_space != NULL) { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 4157 | if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4158 | (map_and_fenceable && !obj_priv->map_and_fenceable)) { | 
| Chris Wilson | ae7d49d | 2010-08-04 12:37:41 +0100 | [diff] [blame] | 4159 | WARN(obj_priv->pin_count, | 
|  | 4160 | "bo is already pinned with incorrect alignment:" | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4161 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," | 
|  | 4162 | " obj->map_and_fenceable=%d\n", | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 4163 | obj_priv->gtt_offset, alignment, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4164 | map_and_fenceable, | 
|  | 4165 | obj_priv->map_and_fenceable); | 
| Chris Wilson | ac0c6b5 | 2010-05-27 13:18:18 +0100 | [diff] [blame] | 4166 | ret = i915_gem_object_unbind(obj); | 
|  | 4167 | if (ret) | 
|  | 4168 | return ret; | 
|  | 4169 | } | 
|  | 4170 | } | 
|  | 4171 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4172 | if (obj_priv->gtt_space == NULL) { | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 4173 | ret = i915_gem_object_bind_to_gtt(obj, alignment, | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4174 | map_and_fenceable); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 4175 | if (ret) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4176 | return ret; | 
| Chris Wilson | 22c344e | 2009-02-11 14:26:45 +0000 | [diff] [blame] | 4177 | } | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4178 |  | 
| Chris Wilson | 7465378 | 2010-10-29 10:41:23 +0100 | [diff] [blame] | 4179 | if (obj_priv->pin_count++ == 0) { | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4180 | i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); | 
| Chris Wilson | f13d3f7 | 2010-09-20 17:36:15 +0100 | [diff] [blame] | 4181 | if (!obj_priv->active) | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 4182 | list_move_tail(&obj_priv->mm_list, | 
| Chris Wilson | f13d3f7 | 2010-09-20 17:36:15 +0100 | [diff] [blame] | 4183 | &dev_priv->mm.pinned_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4184 | } | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4185 | BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4186 |  | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 4187 | WARN_ON(i915_verify_lists(dev)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4188 | return 0; | 
|  | 4189 | } | 
|  | 4190 |  | 
|  | 4191 | void | 
|  | 4192 | i915_gem_object_unpin(struct drm_gem_object *obj) | 
|  | 4193 | { | 
|  | 4194 | struct drm_device *dev = obj->dev; | 
|  | 4195 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4196 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4197 |  | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 4198 | WARN_ON(i915_verify_lists(dev)); | 
| Chris Wilson | 7465378 | 2010-10-29 10:41:23 +0100 | [diff] [blame] | 4199 | BUG_ON(obj_priv->pin_count == 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4200 | BUG_ON(obj_priv->gtt_space == NULL); | 
|  | 4201 |  | 
| Chris Wilson | 7465378 | 2010-10-29 10:41:23 +0100 | [diff] [blame] | 4202 | if (--obj_priv->pin_count == 0) { | 
| Chris Wilson | f13d3f7 | 2010-09-20 17:36:15 +0100 | [diff] [blame] | 4203 | if (!obj_priv->active) | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 4204 | list_move_tail(&obj_priv->mm_list, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4205 | &dev_priv->mm.inactive_list); | 
| Chris Wilson | a00b10c | 2010-09-24 21:15:47 +0100 | [diff] [blame] | 4206 | i915_gem_info_remove_pin(dev_priv, obj_priv); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4207 | } | 
| Chris Wilson | 23bc598 | 2010-09-29 16:10:57 +0100 | [diff] [blame] | 4208 | WARN_ON(i915_verify_lists(dev)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4209 | } | 
|  | 4210 |  | 
|  | 4211 | int | 
|  | 4212 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 
|  | 4213 | struct drm_file *file_priv) | 
|  | 4214 | { | 
|  | 4215 | struct drm_i915_gem_pin *args = data; | 
|  | 4216 | struct drm_gem_object *obj; | 
|  | 4217 | struct drm_i915_gem_object *obj_priv; | 
|  | 4218 | int ret; | 
|  | 4219 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4220 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 4221 | if (ret) | 
|  | 4222 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4223 |  | 
|  | 4224 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4225 | if (obj == NULL) { | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4226 | ret = -ENOENT; | 
|  | 4227 | goto unlock; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4228 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4229 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4230 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 4231 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 
|  | 4232 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4233 | ret = -EINVAL; | 
|  | 4234 | goto out; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4235 | } | 
|  | 4236 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4237 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 
|  | 4238 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 
|  | 4239 | args->handle); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4240 | ret = -EINVAL; | 
|  | 4241 | goto out; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4242 | } | 
|  | 4243 |  | 
|  | 4244 | obj_priv->user_pin_count++; | 
|  | 4245 | obj_priv->pin_filp = file_priv; | 
|  | 4246 | if (obj_priv->user_pin_count == 1) { | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4247 | ret = i915_gem_object_pin(obj, args->alignment, true); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4248 | if (ret) | 
|  | 4249 | goto out; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4250 | } | 
|  | 4251 |  | 
|  | 4252 | /* XXX - flush the CPU caches for pinned objects | 
|  | 4253 | * as the X server doesn't manage domains yet | 
|  | 4254 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 4255 | i915_gem_object_flush_cpu_write_domain(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4256 | args->offset = obj_priv->gtt_offset; | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4257 | out: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4258 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4259 | unlock: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4260 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4261 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4262 | } | 
|  | 4263 |  | 
|  | 4264 | int | 
|  | 4265 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 
|  | 4266 | struct drm_file *file_priv) | 
|  | 4267 | { | 
|  | 4268 | struct drm_i915_gem_pin *args = data; | 
|  | 4269 | struct drm_gem_object *obj; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4270 | struct drm_i915_gem_object *obj_priv; | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 4271 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4272 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4273 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 4274 | if (ret) | 
|  | 4275 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4276 |  | 
|  | 4277 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4278 | if (obj == NULL) { | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4279 | ret = -ENOENT; | 
|  | 4280 | goto unlock; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4281 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4282 | obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 4283 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4284 | if (obj_priv->pin_filp != file_priv) { | 
|  | 4285 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 
|  | 4286 | args->handle); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4287 | ret = -EINVAL; | 
|  | 4288 | goto out; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4289 | } | 
|  | 4290 | obj_priv->user_pin_count--; | 
|  | 4291 | if (obj_priv->user_pin_count == 0) { | 
|  | 4292 | obj_priv->pin_filp = NULL; | 
|  | 4293 | i915_gem_object_unpin(obj); | 
|  | 4294 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4295 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4296 | out: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4297 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4298 | unlock: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4299 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4300 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4301 | } | 
|  | 4302 |  | 
|  | 4303 | int | 
|  | 4304 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 
|  | 4305 | struct drm_file *file_priv) | 
|  | 4306 | { | 
|  | 4307 | struct drm_i915_gem_busy *args = data; | 
|  | 4308 | struct drm_gem_object *obj; | 
|  | 4309 | struct drm_i915_gem_object *obj_priv; | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 4310 | int ret; | 
|  | 4311 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4312 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 4313 | if (ret) | 
|  | 4314 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4315 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4316 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4317 | if (obj == NULL) { | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4318 | ret = -ENOENT; | 
|  | 4319 | goto unlock; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4320 | } | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4321 | obj_priv = to_intel_bo(obj); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4322 |  | 
| Chris Wilson | 0be555b | 2010-08-04 15:36:30 +0100 | [diff] [blame] | 4323 | /* Count all active objects as busy, even if they are currently not used | 
|  | 4324 | * by the gpu. Users of this interface expect objects to eventually | 
|  | 4325 | * become non-busy without any further actions, therefore emit any | 
|  | 4326 | * necessary flushes here. | 
| Eric Anholt | c4de0a5 | 2008-12-14 19:05:04 -0800 | [diff] [blame] | 4327 | */ | 
| Chris Wilson | 0be555b | 2010-08-04 15:36:30 +0100 | [diff] [blame] | 4328 | args->busy = obj_priv->active; | 
|  | 4329 | if (args->busy) { | 
|  | 4330 | /* Unconditionally flush objects, even when the gpu still uses this | 
|  | 4331 | * object. Userspace calling this function indicates that it wants to | 
|  | 4332 | * use this buffer rather sooner than later, so issuing the required | 
|  | 4333 | * flush earlier is beneficial. | 
|  | 4334 | */ | 
| Chris Wilson | c78ec30 | 2010-09-20 12:50:23 +0100 | [diff] [blame] | 4335 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) | 
|  | 4336 | i915_gem_flush_ring(dev, file_priv, | 
| Chris Wilson | 9220434 | 2010-09-18 11:02:01 +0100 | [diff] [blame] | 4337 | obj_priv->ring, | 
|  | 4338 | 0, obj->write_domain); | 
| Chris Wilson | 0be555b | 2010-08-04 15:36:30 +0100 | [diff] [blame] | 4339 |  | 
|  | 4340 | /* Update the active list for the hardware's current position. | 
|  | 4341 | * Otherwise this only updates on a delayed timer or when irqs | 
|  | 4342 | * are actually unmasked, and our working set ends up being | 
|  | 4343 | * larger than required. | 
|  | 4344 | */ | 
|  | 4345 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | 
|  | 4346 |  | 
|  | 4347 | args->busy = obj_priv->active; | 
|  | 4348 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4349 |  | 
|  | 4350 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4351 | unlock: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4352 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4353 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4354 | } | 
|  | 4355 |  | 
|  | 4356 | int | 
|  | 4357 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 
|  | 4358 | struct drm_file *file_priv) | 
|  | 4359 | { | 
|  | 4360 | return i915_gem_ring_throttle(dev, file_priv); | 
|  | 4361 | } | 
|  | 4362 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4363 | int | 
|  | 4364 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | 
|  | 4365 | struct drm_file *file_priv) | 
|  | 4366 | { | 
|  | 4367 | struct drm_i915_gem_madvise *args = data; | 
|  | 4368 | struct drm_gem_object *obj; | 
|  | 4369 | struct drm_i915_gem_object *obj_priv; | 
| Chris Wilson | 76c1dec | 2010-09-25 11:22:51 +0100 | [diff] [blame] | 4370 | int ret; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4371 |  | 
|  | 4372 | switch (args->madv) { | 
|  | 4373 | case I915_MADV_DONTNEED: | 
|  | 4374 | case I915_MADV_WILLNEED: | 
|  | 4375 | break; | 
|  | 4376 | default: | 
|  | 4377 | return -EINVAL; | 
|  | 4378 | } | 
|  | 4379 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4380 | ret = i915_mutex_lock_interruptible(dev); | 
|  | 4381 | if (ret) | 
|  | 4382 | return ret; | 
|  | 4383 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4384 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4385 | if (obj == NULL) { | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4386 | ret = -ENOENT; | 
|  | 4387 | goto unlock; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4388 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4389 | obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4390 |  | 
|  | 4391 | if (obj_priv->pin_count) { | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4392 | ret = -EINVAL; | 
|  | 4393 | goto out; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4394 | } | 
|  | 4395 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 4396 | if (obj_priv->madv != __I915_MADV_PURGED) | 
|  | 4397 | obj_priv->madv = args->madv; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4398 |  | 
| Chris Wilson | 2d7ef39 | 2009-09-20 23:13:10 +0100 | [diff] [blame] | 4399 | /* if the object is no longer bound, discard its backing storage */ | 
|  | 4400 | if (i915_gem_object_is_purgeable(obj_priv) && | 
|  | 4401 | obj_priv->gtt_space == NULL) | 
|  | 4402 | i915_gem_object_truncate(obj); | 
|  | 4403 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 4404 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 
|  | 4405 |  | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4406 | out: | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4407 | drm_gem_object_unreference(obj); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4408 | unlock: | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4409 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | 1d7cfea | 2010-10-17 09:45:41 +0100 | [diff] [blame] | 4410 | return ret; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4411 | } | 
|  | 4412 |  | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 4413 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 
|  | 4414 | size_t size) | 
|  | 4415 | { | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 4416 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4417 | struct drm_i915_gem_object *obj; | 
|  | 4418 |  | 
|  | 4419 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 
|  | 4420 | if (obj == NULL) | 
|  | 4421 | return NULL; | 
|  | 4422 |  | 
|  | 4423 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { | 
|  | 4424 | kfree(obj); | 
|  | 4425 | return NULL; | 
|  | 4426 | } | 
|  | 4427 |  | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 4428 | i915_gem_info_add_obj(dev_priv, size); | 
|  | 4429 |  | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4430 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 4431 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 
|  | 4432 |  | 
|  | 4433 | obj->agp_type = AGP_USER_MEMORY; | 
| Daniel Vetter | 62b8b21 | 2010-04-09 19:05:08 +0000 | [diff] [blame] | 4434 | obj->base.driver_private = NULL; | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4435 | obj->fence_reg = I915_FENCE_REG_NONE; | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 4436 | INIT_LIST_HEAD(&obj->mm_list); | 
|  | 4437 | INIT_LIST_HEAD(&obj->ring_list); | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4438 | INIT_LIST_HEAD(&obj->gpu_write_list); | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4439 | obj->madv = I915_MADV_WILLNEED; | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4440 | /* Avoid an unnecessary call to unbind on the first bind. */ | 
|  | 4441 | obj->map_and_fenceable = true; | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4442 |  | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4443 | return &obj->base; | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 4444 | } | 
|  | 4445 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4446 | int i915_gem_init_object(struct drm_gem_object *obj) | 
|  | 4447 | { | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4448 | BUG(); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4449 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4450 | return 0; | 
|  | 4451 | } | 
|  | 4452 |  | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4453 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) | 
|  | 4454 | { | 
|  | 4455 | struct drm_device *dev = obj->dev; | 
|  | 4456 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4457 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
|  | 4458 | int ret; | 
|  | 4459 |  | 
|  | 4460 | ret = i915_gem_object_unbind(obj); | 
|  | 4461 | if (ret == -ERESTARTSYS) { | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 4462 | list_move(&obj_priv->mm_list, | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4463 | &dev_priv->mm.deferred_free_list); | 
|  | 4464 | return; | 
|  | 4465 | } | 
|  | 4466 |  | 
| Chris Wilson | 39a01d1 | 2010-10-28 13:03:06 +0100 | [diff] [blame] | 4467 | if (obj->map_list.map) | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4468 | i915_gem_free_mmap_offset(obj); | 
|  | 4469 |  | 
|  | 4470 | drm_gem_object_release(obj); | 
| Chris Wilson | 73aa808 | 2010-09-30 11:46:12 +0100 | [diff] [blame] | 4471 | i915_gem_info_remove_obj(dev_priv, obj->size); | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4472 |  | 
|  | 4473 | kfree(obj_priv->page_cpu_valid); | 
|  | 4474 | kfree(obj_priv->bit_17); | 
|  | 4475 | kfree(obj_priv); | 
|  | 4476 | } | 
|  | 4477 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4478 | void i915_gem_free_object(struct drm_gem_object *obj) | 
|  | 4479 | { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4480 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4481 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4482 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 4483 | trace_i915_gem_object_destroy(obj); | 
|  | 4484 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4485 | while (obj_priv->pin_count > 0) | 
|  | 4486 | i915_gem_object_unpin(obj); | 
|  | 4487 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4488 | if (obj_priv->phys_obj) | 
|  | 4489 | i915_gem_detach_phys_object(dev, obj); | 
|  | 4490 |  | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4491 | i915_gem_free_object_tail(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4492 | } | 
|  | 4493 |  | 
| Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 4494 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4495 | i915_gem_idle(struct drm_device *dev) | 
|  | 4496 | { | 
|  | 4497 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4498 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4499 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4500 | mutex_lock(&dev->struct_mutex); | 
|  | 4501 |  | 
| Chris Wilson | 87acb0a | 2010-10-19 10:13:00 +0100 | [diff] [blame] | 4502 | if (dev_priv->mm.suspended) { | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4503 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4504 | return 0; | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4505 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4506 |  | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4507 | ret = i915_gpu_idle(dev); | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4508 | if (ret) { | 
|  | 4509 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4510 | return ret; | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4511 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4512 |  | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4513 | /* Under UMS, be paranoid and evict. */ | 
|  | 4514 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | 
| Chris Wilson | 5eac3ab | 2010-10-31 08:49:47 +0000 | [diff] [blame] | 4515 | ret = i915_gem_evict_inactive(dev, false); | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4516 | if (ret) { | 
|  | 4517 | mutex_unlock(&dev->struct_mutex); | 
|  | 4518 | return ret; | 
|  | 4519 | } | 
|  | 4520 | } | 
|  | 4521 |  | 
|  | 4522 | /* Hack!  Don't let anybody do execbuf while we don't control the chip. | 
|  | 4523 | * We need to replace this with a semaphore, or something. | 
|  | 4524 | * And not confound mm.suspended! | 
|  | 4525 | */ | 
|  | 4526 | dev_priv->mm.suspended = 1; | 
| Daniel Vetter | bc0c7f1 | 2010-08-20 18:18:48 +0200 | [diff] [blame] | 4527 | del_timer_sync(&dev_priv->hangcheck_timer); | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4528 |  | 
|  | 4529 | i915_kernel_lost_context(dev); | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4530 | i915_gem_cleanup_ringbuffer(dev); | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4531 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4532 | mutex_unlock(&dev->struct_mutex); | 
|  | 4533 |  | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4534 | /* Cancel the retire work handler, which should be idle now. */ | 
|  | 4535 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 
|  | 4536 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4537 | return 0; | 
|  | 4538 | } | 
|  | 4539 |  | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4540 | /* | 
|  | 4541 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | 
|  | 4542 | * over cache flushing. | 
|  | 4543 | */ | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4544 | static int | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4545 | i915_gem_init_pipe_control(struct drm_device *dev) | 
|  | 4546 | { | 
|  | 4547 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4548 | struct drm_gem_object *obj; | 
|  | 4549 | struct drm_i915_gem_object *obj_priv; | 
|  | 4550 | int ret; | 
|  | 4551 |  | 
| Eric Anholt | 34dc4d4 | 2010-05-07 14:30:03 -0700 | [diff] [blame] | 4552 | obj = i915_gem_alloc_object(dev, 4096); | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4553 | if (obj == NULL) { | 
|  | 4554 | DRM_ERROR("Failed to allocate seqno page\n"); | 
|  | 4555 | ret = -ENOMEM; | 
|  | 4556 | goto err; | 
|  | 4557 | } | 
|  | 4558 | obj_priv = to_intel_bo(obj); | 
|  | 4559 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 
|  | 4560 |  | 
| Daniel Vetter | 75e9e91 | 2010-11-04 17:11:09 +0100 | [diff] [blame^] | 4561 | ret = i915_gem_object_pin(obj, 4096, true); | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4562 | if (ret) | 
|  | 4563 | goto err_unref; | 
|  | 4564 |  | 
|  | 4565 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | 
|  | 4566 | dev_priv->seqno_page =  kmap(obj_priv->pages[0]); | 
|  | 4567 | if (dev_priv->seqno_page == NULL) | 
|  | 4568 | goto err_unpin; | 
|  | 4569 |  | 
|  | 4570 | dev_priv->seqno_obj = obj; | 
|  | 4571 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | 
|  | 4572 |  | 
|  | 4573 | return 0; | 
|  | 4574 |  | 
|  | 4575 | err_unpin: | 
|  | 4576 | i915_gem_object_unpin(obj); | 
|  | 4577 | err_unref: | 
|  | 4578 | drm_gem_object_unreference(obj); | 
|  | 4579 | err: | 
|  | 4580 | return ret; | 
|  | 4581 | } | 
|  | 4582 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4583 |  | 
|  | 4584 | static void | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4585 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | 
|  | 4586 | { | 
|  | 4587 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4588 | struct drm_gem_object *obj; | 
|  | 4589 | struct drm_i915_gem_object *obj_priv; | 
|  | 4590 |  | 
|  | 4591 | obj = dev_priv->seqno_obj; | 
|  | 4592 | obj_priv = to_intel_bo(obj); | 
|  | 4593 | kunmap(obj_priv->pages[0]); | 
|  | 4594 | i915_gem_object_unpin(obj); | 
|  | 4595 | drm_gem_object_unreference(obj); | 
|  | 4596 | dev_priv->seqno_obj = NULL; | 
|  | 4597 |  | 
|  | 4598 | dev_priv->seqno_page = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4599 | } | 
|  | 4600 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4601 | int | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4602 | i915_gem_init_ringbuffer(struct drm_device *dev) | 
|  | 4603 | { | 
|  | 4604 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4605 | int ret; | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4606 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4607 | if (HAS_PIPE_CONTROL(dev)) { | 
|  | 4608 | ret = i915_gem_init_pipe_control(dev); | 
|  | 4609 | if (ret) | 
|  | 4610 | return ret; | 
|  | 4611 | } | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4612 |  | 
| Xiang, Haihao | 5c1143b | 2010-09-16 10:43:11 +0800 | [diff] [blame] | 4613 | ret = intel_init_render_ring_buffer(dev); | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4614 | if (ret) | 
|  | 4615 | goto cleanup_pipe_control; | 
|  | 4616 |  | 
|  | 4617 | if (HAS_BSD(dev)) { | 
| Xiang, Haihao | 5c1143b | 2010-09-16 10:43:11 +0800 | [diff] [blame] | 4618 | ret = intel_init_bsd_ring_buffer(dev); | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4619 | if (ret) | 
|  | 4620 | goto cleanup_render_ring; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4621 | } | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4622 |  | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 4623 | if (HAS_BLT(dev)) { | 
|  | 4624 | ret = intel_init_blt_ring_buffer(dev); | 
|  | 4625 | if (ret) | 
|  | 4626 | goto cleanup_bsd_ring; | 
|  | 4627 | } | 
|  | 4628 |  | 
| Chris Wilson | 6f392d5 | 2010-08-07 11:01:22 +0100 | [diff] [blame] | 4629 | dev_priv->next_seqno = 1; | 
|  | 4630 |  | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4631 | return 0; | 
|  | 4632 |  | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 4633 | cleanup_bsd_ring: | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 4634 | intel_cleanup_ring_buffer(&dev_priv->bsd_ring); | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4635 | cleanup_render_ring: | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 4636 | intel_cleanup_ring_buffer(&dev_priv->render_ring); | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4637 | cleanup_pipe_control: | 
|  | 4638 | if (HAS_PIPE_CONTROL(dev)) | 
|  | 4639 | i915_gem_cleanup_pipe_control(dev); | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4640 | return ret; | 
|  | 4641 | } | 
|  | 4642 |  | 
|  | 4643 | void | 
|  | 4644 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) | 
|  | 4645 | { | 
|  | 4646 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4647 |  | 
| Chris Wilson | 78501ea | 2010-10-27 12:18:21 +0100 | [diff] [blame] | 4648 | intel_cleanup_ring_buffer(&dev_priv->render_ring); | 
|  | 4649 | intel_cleanup_ring_buffer(&dev_priv->bsd_ring); | 
|  | 4650 | intel_cleanup_ring_buffer(&dev_priv->blt_ring); | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4651 | if (HAS_PIPE_CONTROL(dev)) | 
|  | 4652 | i915_gem_cleanup_pipe_control(dev); | 
|  | 4653 | } | 
|  | 4654 |  | 
|  | 4655 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4656 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | 
|  | 4657 | struct drm_file *file_priv) | 
|  | 4658 | { | 
|  | 4659 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4660 | int ret; | 
|  | 4661 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4662 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4663 | return 0; | 
|  | 4664 |  | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 4665 | if (atomic_read(&dev_priv->mm.wedged)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4666 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 4667 | atomic_set(&dev_priv->mm.wedged, 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4668 | } | 
|  | 4669 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4670 | mutex_lock(&dev->struct_mutex); | 
| Eric Anholt | 9bb2d6f | 2008-12-23 18:42:32 -0800 | [diff] [blame] | 4671 | dev_priv->mm.suspended = 0; | 
|  | 4672 |  | 
|  | 4673 | ret = i915_gem_init_ringbuffer(dev); | 
| Wu Fengguang | d816f6ac | 2009-04-18 10:43:32 +0800 | [diff] [blame] | 4674 | if (ret != 0) { | 
|  | 4675 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 9bb2d6f | 2008-12-23 18:42:32 -0800 | [diff] [blame] | 4676 | return ret; | 
| Wu Fengguang | d816f6ac | 2009-04-18 10:43:32 +0800 | [diff] [blame] | 4677 | } | 
| Eric Anholt | 9bb2d6f | 2008-12-23 18:42:32 -0800 | [diff] [blame] | 4678 |  | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 4679 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4680 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); | 
| Chris Wilson | 87acb0a | 2010-10-19 10:13:00 +0100 | [diff] [blame] | 4681 | BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 4682 | BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4683 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 
|  | 4684 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4685 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); | 
| Chris Wilson | 87acb0a | 2010-10-19 10:13:00 +0100 | [diff] [blame] | 4686 | BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); | 
| Chris Wilson | 549f736 | 2010-10-19 11:19:32 +0100 | [diff] [blame] | 4687 | BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4688 | mutex_unlock(&dev->struct_mutex); | 
| Kristian Høgsberg | dbb19d3 | 2008-08-20 11:04:27 -0400 | [diff] [blame] | 4689 |  | 
| Chris Wilson | 5f35308 | 2010-06-07 14:03:03 +0100 | [diff] [blame] | 4690 | ret = drm_irq_install(dev); | 
|  | 4691 | if (ret) | 
|  | 4692 | goto cleanup_ringbuffer; | 
| Kristian Høgsberg | dbb19d3 | 2008-08-20 11:04:27 -0400 | [diff] [blame] | 4693 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4694 | return 0; | 
| Chris Wilson | 5f35308 | 2010-06-07 14:03:03 +0100 | [diff] [blame] | 4695 |  | 
|  | 4696 | cleanup_ringbuffer: | 
|  | 4697 | mutex_lock(&dev->struct_mutex); | 
|  | 4698 | i915_gem_cleanup_ringbuffer(dev); | 
|  | 4699 | dev_priv->mm.suspended = 1; | 
|  | 4700 | mutex_unlock(&dev->struct_mutex); | 
|  | 4701 |  | 
|  | 4702 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4703 | } | 
|  | 4704 |  | 
|  | 4705 | int | 
|  | 4706 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 
|  | 4707 | struct drm_file *file_priv) | 
|  | 4708 | { | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4709 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4710 | return 0; | 
|  | 4711 |  | 
| Kristian Høgsberg | dbb19d3 | 2008-08-20 11:04:27 -0400 | [diff] [blame] | 4712 | drm_irq_uninstall(dev); | 
| Linus Torvalds | e6890f6 | 2009-09-08 17:09:24 -0700 | [diff] [blame] | 4713 | return i915_gem_idle(dev); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4714 | } | 
|  | 4715 |  | 
|  | 4716 | void | 
|  | 4717 | i915_gem_lastclose(struct drm_device *dev) | 
|  | 4718 | { | 
|  | 4719 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4720 |  | 
| Eric Anholt | e806b49 | 2009-01-22 09:56:58 -0800 | [diff] [blame] | 4721 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4722 | return; | 
|  | 4723 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4724 | ret = i915_gem_idle(dev); | 
|  | 4725 | if (ret) | 
|  | 4726 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4727 | } | 
|  | 4728 |  | 
| Chris Wilson | 6419340 | 2010-10-24 12:38:05 +0100 | [diff] [blame] | 4729 | static void | 
|  | 4730 | init_ring_lists(struct intel_ring_buffer *ring) | 
|  | 4731 | { | 
|  | 4732 | INIT_LIST_HEAD(&ring->active_list); | 
|  | 4733 | INIT_LIST_HEAD(&ring->request_list); | 
|  | 4734 | INIT_LIST_HEAD(&ring->gpu_write_list); | 
|  | 4735 | } | 
|  | 4736 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4737 | void | 
|  | 4738 | i915_gem_load(struct drm_device *dev) | 
|  | 4739 | { | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4740 | int i; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4741 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4742 |  | 
| Chris Wilson | 69dc498 | 2010-10-19 10:36:51 +0100 | [diff] [blame] | 4743 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4744 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 
|  | 4745 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 
| Chris Wilson | f13d3f7 | 2010-09-20 17:36:15 +0100 | [diff] [blame] | 4746 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 4747 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4748 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 
| Chris Wilson | 6419340 | 2010-10-24 12:38:05 +0100 | [diff] [blame] | 4749 | init_ring_lists(&dev_priv->render_ring); | 
|  | 4750 | init_ring_lists(&dev_priv->bsd_ring); | 
|  | 4751 | init_ring_lists(&dev_priv->blt_ring); | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 4752 | for (i = 0; i < 16; i++) | 
|  | 4753 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4754 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 
|  | 4755 | i915_gem_retire_work_handler); | 
| Chris Wilson | 30dbf0c | 2010-09-25 10:19:17 +0100 | [diff] [blame] | 4756 | init_completion(&dev_priv->error_completion); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 4757 |  | 
| Dave Airlie | 9440012 | 2010-07-20 13:15:31 +1000 | [diff] [blame] | 4758 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | 
|  | 4759 | if (IS_GEN3(dev)) { | 
|  | 4760 | u32 tmp = I915_READ(MI_ARB_STATE); | 
|  | 4761 | if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { | 
|  | 4762 | /* arb state is a masked write, so set bit + bit in mask */ | 
|  | 4763 | tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); | 
|  | 4764 | I915_WRITE(MI_ARB_STATE, tmp); | 
|  | 4765 | } | 
|  | 4766 | } | 
|  | 4767 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4768 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 
| Eric Anholt | b397c83 | 2010-01-26 09:43:10 -0800 | [diff] [blame] | 4769 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4770 | dev_priv->fence_reg_start = 3; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4771 |  | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 4772 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4773 | dev_priv->num_fence_regs = 16; | 
|  | 4774 | else | 
|  | 4775 | dev_priv->num_fence_regs = 8; | 
|  | 4776 |  | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4777 | /* Initialize fence registers to zero */ | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 4778 | switch (INTEL_INFO(dev)->gen) { | 
|  | 4779 | case 6: | 
|  | 4780 | for (i = 0; i < 16; i++) | 
|  | 4781 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0); | 
|  | 4782 | break; | 
|  | 4783 | case 5: | 
|  | 4784 | case 4: | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4785 | for (i = 0; i < 16; i++) | 
|  | 4786 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 4787 | break; | 
|  | 4788 | case 3: | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4789 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 
|  | 4790 | for (i = 0; i < 8; i++) | 
|  | 4791 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | 
| Chris Wilson | a6c45cf | 2010-09-17 00:32:17 +0100 | [diff] [blame] | 4792 | case 2: | 
|  | 4793 | for (i = 0; i < 8; i++) | 
|  | 4794 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | 
|  | 4795 | break; | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4796 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4797 | i915_gem_detect_bit_6_swizzle(dev); | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 4798 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 4799 |  | 
|  | 4800 | dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; | 
|  | 4801 | dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; | 
|  | 4802 | register_shrinker(&dev_priv->mm.inactive_shrinker); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4803 | } | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4804 |  | 
|  | 4805 | /* | 
|  | 4806 | * Create a physically contiguous memory object for this object | 
|  | 4807 | * e.g. for cursor + overlay regs | 
|  | 4808 | */ | 
| Chris Wilson | 995b6762 | 2010-08-20 13:23:26 +0100 | [diff] [blame] | 4809 | static int i915_gem_init_phys_object(struct drm_device *dev, | 
|  | 4810 | int id, int size, int align) | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4811 | { | 
|  | 4812 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4813 | struct drm_i915_gem_phys_object *phys_obj; | 
|  | 4814 | int ret; | 
|  | 4815 |  | 
|  | 4816 | if (dev_priv->mm.phys_objs[id - 1] || !size) | 
|  | 4817 | return 0; | 
|  | 4818 |  | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 4819 | phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4820 | if (!phys_obj) | 
|  | 4821 | return -ENOMEM; | 
|  | 4822 |  | 
|  | 4823 | phys_obj->id = id; | 
|  | 4824 |  | 
| Chris Wilson | 6eeefaf | 2010-08-07 11:01:39 +0100 | [diff] [blame] | 4825 | phys_obj->handle = drm_pci_alloc(dev, size, align); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4826 | if (!phys_obj->handle) { | 
|  | 4827 | ret = -ENOMEM; | 
|  | 4828 | goto kfree_obj; | 
|  | 4829 | } | 
|  | 4830 | #ifdef CONFIG_X86 | 
|  | 4831 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | 
|  | 4832 | #endif | 
|  | 4833 |  | 
|  | 4834 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | 
|  | 4835 |  | 
|  | 4836 | return 0; | 
|  | 4837 | kfree_obj: | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 4838 | kfree(phys_obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4839 | return ret; | 
|  | 4840 | } | 
|  | 4841 |  | 
| Chris Wilson | 995b6762 | 2010-08-20 13:23:26 +0100 | [diff] [blame] | 4842 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4843 | { | 
|  | 4844 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4845 | struct drm_i915_gem_phys_object *phys_obj; | 
|  | 4846 |  | 
|  | 4847 | if (!dev_priv->mm.phys_objs[id - 1]) | 
|  | 4848 | return; | 
|  | 4849 |  | 
|  | 4850 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | 
|  | 4851 | if (phys_obj->cur_obj) { | 
|  | 4852 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | 
|  | 4853 | } | 
|  | 4854 |  | 
|  | 4855 | #ifdef CONFIG_X86 | 
|  | 4856 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | 
|  | 4857 | #endif | 
|  | 4858 | drm_pci_free(dev, phys_obj->handle); | 
|  | 4859 | kfree(phys_obj); | 
|  | 4860 | dev_priv->mm.phys_objs[id - 1] = NULL; | 
|  | 4861 | } | 
|  | 4862 |  | 
|  | 4863 | void i915_gem_free_all_phys_object(struct drm_device *dev) | 
|  | 4864 | { | 
|  | 4865 | int i; | 
|  | 4866 |  | 
| Dave Airlie | 260883c | 2009-01-22 17:58:49 +1000 | [diff] [blame] | 4867 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4868 | i915_gem_free_phys_object(dev, i); | 
|  | 4869 | } | 
|  | 4870 |  | 
|  | 4871 | void i915_gem_detach_phys_object(struct drm_device *dev, | 
|  | 4872 | struct drm_gem_object *obj) | 
|  | 4873 | { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4874 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 
|  | 4875 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
|  | 4876 | char *vaddr; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4877 | int i; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4878 | int page_count; | 
|  | 4879 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4880 | if (!obj_priv->phys_obj) | 
|  | 4881 | return; | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4882 | vaddr = obj_priv->phys_obj->handle->vaddr; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4883 |  | 
|  | 4884 | page_count = obj->size / PAGE_SIZE; | 
|  | 4885 |  | 
|  | 4886 | for (i = 0; i < page_count; i++) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4887 | struct page *page = read_cache_page_gfp(mapping, i, | 
|  | 4888 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 
|  | 4889 | if (!IS_ERR(page)) { | 
|  | 4890 | char *dst = kmap_atomic(page); | 
|  | 4891 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | 
|  | 4892 | kunmap_atomic(dst); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4893 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4894 | drm_clflush_pages(&page, 1); | 
|  | 4895 |  | 
|  | 4896 | set_page_dirty(page); | 
|  | 4897 | mark_page_accessed(page); | 
|  | 4898 | page_cache_release(page); | 
|  | 4899 | } | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4900 | } | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4901 | drm_agp_chipset_flush(dev); | 
| Chris Wilson | d78b47b | 2009-06-17 21:52:49 +0100 | [diff] [blame] | 4902 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4903 | obj_priv->phys_obj->cur_obj = NULL; | 
|  | 4904 | obj_priv->phys_obj = NULL; | 
|  | 4905 | } | 
|  | 4906 |  | 
|  | 4907 | int | 
|  | 4908 | i915_gem_attach_phys_object(struct drm_device *dev, | 
| Chris Wilson | 6eeefaf | 2010-08-07 11:01:39 +0100 | [diff] [blame] | 4909 | struct drm_gem_object *obj, | 
|  | 4910 | int id, | 
|  | 4911 | int align) | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4912 | { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4913 | struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4914 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4915 | struct drm_i915_gem_object *obj_priv; | 
|  | 4916 | int ret = 0; | 
|  | 4917 | int page_count; | 
|  | 4918 | int i; | 
|  | 4919 |  | 
|  | 4920 | if (id > I915_MAX_PHYS_OBJECT) | 
|  | 4921 | return -EINVAL; | 
|  | 4922 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4923 | obj_priv = to_intel_bo(obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4924 |  | 
|  | 4925 | if (obj_priv->phys_obj) { | 
|  | 4926 | if (obj_priv->phys_obj->id == id) | 
|  | 4927 | return 0; | 
|  | 4928 | i915_gem_detach_phys_object(dev, obj); | 
|  | 4929 | } | 
|  | 4930 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4931 | /* create a new object */ | 
|  | 4932 | if (!dev_priv->mm.phys_objs[id - 1]) { | 
|  | 4933 | ret = i915_gem_init_phys_object(dev, id, | 
| Chris Wilson | 6eeefaf | 2010-08-07 11:01:39 +0100 | [diff] [blame] | 4934 | obj->size, align); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4935 | if (ret) { | 
| Linus Torvalds | aeb565d | 2009-01-26 10:01:53 -0800 | [diff] [blame] | 4936 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4937 | return ret; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4938 | } | 
|  | 4939 | } | 
|  | 4940 |  | 
|  | 4941 | /* bind to the object */ | 
|  | 4942 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 
|  | 4943 | obj_priv->phys_obj->cur_obj = obj; | 
|  | 4944 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4945 | page_count = obj->size / PAGE_SIZE; | 
|  | 4946 |  | 
|  | 4947 | for (i = 0; i < page_count; i++) { | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4948 | struct page *page; | 
|  | 4949 | char *dst, *src; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4950 |  | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4951 | page = read_cache_page_gfp(mapping, i, | 
|  | 4952 | GFP_HIGHUSER | __GFP_RECLAIMABLE); | 
|  | 4953 | if (IS_ERR(page)) | 
|  | 4954 | return PTR_ERR(page); | 
|  | 4955 |  | 
| Chris Wilson | ff75b9b | 2010-10-30 22:52:31 +0100 | [diff] [blame] | 4956 | src = kmap_atomic(page); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4957 | dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4958 | memcpy(dst, src, PAGE_SIZE); | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 4959 | kunmap_atomic(src); | 
| Chris Wilson | e5281cc | 2010-10-28 13:45:36 +0100 | [diff] [blame] | 4960 |  | 
|  | 4961 | mark_page_accessed(page); | 
|  | 4962 | page_cache_release(page); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4963 | } | 
|  | 4964 |  | 
|  | 4965 | return 0; | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4966 | } | 
|  | 4967 |  | 
|  | 4968 | static int | 
|  | 4969 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 4970 | struct drm_i915_gem_pwrite *args, | 
|  | 4971 | struct drm_file *file_priv) | 
|  | 4972 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4973 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4974 | void *obj_addr; | 
|  | 4975 | int ret; | 
|  | 4976 | char __user *user_data; | 
|  | 4977 |  | 
|  | 4978 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 4979 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | 
|  | 4980 |  | 
| Zhao Yakui | 44d98a6 | 2009-10-09 11:39:40 +0800 | [diff] [blame] | 4981 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4982 | ret = copy_from_user(obj_addr, user_data, args->size); | 
|  | 4983 | if (ret) | 
|  | 4984 | return -EFAULT; | 
|  | 4985 |  | 
|  | 4986 | drm_agp_chipset_flush(dev); | 
|  | 4987 | return 0; | 
|  | 4988 | } | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 4989 |  | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 4990 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 4991 | { | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 4992 | struct drm_i915_file_private *file_priv = file->driver_priv; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 4993 |  | 
|  | 4994 | /* Clean up our request list when the client is going away, so that | 
|  | 4995 | * later retire_requests won't dereference our soon-to-be-gone | 
|  | 4996 | * file_priv. | 
|  | 4997 | */ | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 4998 | spin_lock(&file_priv->mm.lock); | 
| Chris Wilson | f787a5f | 2010-09-24 16:02:42 +0100 | [diff] [blame] | 4999 | while (!list_empty(&file_priv->mm.request_list)) { | 
|  | 5000 | struct drm_i915_gem_request *request; | 
|  | 5001 |  | 
|  | 5002 | request = list_first_entry(&file_priv->mm.request_list, | 
|  | 5003 | struct drm_i915_gem_request, | 
|  | 5004 | client_list); | 
|  | 5005 | list_del(&request->client_list); | 
|  | 5006 | request->file_priv = NULL; | 
|  | 5007 | } | 
| Chris Wilson | 1c25595 | 2010-09-26 11:03:27 +0100 | [diff] [blame] | 5008 | spin_unlock(&file_priv->mm.lock); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 5009 | } | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5010 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5011 | static int | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5012 | i915_gpu_is_active(struct drm_device *dev) | 
|  | 5013 | { | 
|  | 5014 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 5015 | int lists_empty; | 
|  | 5016 |  | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5017 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5018 | list_empty(&dev_priv->mm.active_list); | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5019 |  | 
|  | 5020 | return !lists_empty; | 
|  | 5021 | } | 
|  | 5022 |  | 
|  | 5023 | static int | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5024 | i915_gem_inactive_shrink(struct shrinker *shrinker, | 
|  | 5025 | int nr_to_scan, | 
|  | 5026 | gfp_t gfp_mask) | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5027 | { | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5028 | struct drm_i915_private *dev_priv = | 
|  | 5029 | container_of(shrinker, | 
|  | 5030 | struct drm_i915_private, | 
|  | 5031 | mm.inactive_shrinker); | 
|  | 5032 | struct drm_device *dev = dev_priv->dev; | 
|  | 5033 | struct drm_i915_gem_object *obj, *next; | 
|  | 5034 | int cnt; | 
|  | 5035 |  | 
|  | 5036 | if (!mutex_trylock(&dev->struct_mutex)) | 
| Chris Wilson | bbe2e11 | 2010-10-28 22:35:07 +0100 | [diff] [blame] | 5037 | return 0; | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5038 |  | 
|  | 5039 | /* "fast-path" to count number of available objects */ | 
|  | 5040 | if (nr_to_scan == 0) { | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5041 | cnt = 0; | 
|  | 5042 | list_for_each_entry(obj, | 
|  | 5043 | &dev_priv->mm.inactive_list, | 
|  | 5044 | mm_list) | 
|  | 5045 | cnt++; | 
|  | 5046 | mutex_unlock(&dev->struct_mutex); | 
|  | 5047 | return cnt / 100 * sysctl_vfs_cache_pressure; | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5048 | } | 
|  | 5049 |  | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5050 | rescan: | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5051 | /* first scan for clean buffers */ | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5052 | i915_gem_retire_requests(dev); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5053 |  | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5054 | list_for_each_entry_safe(obj, next, | 
|  | 5055 | &dev_priv->mm.inactive_list, | 
|  | 5056 | mm_list) { | 
|  | 5057 | if (i915_gem_object_is_purgeable(obj)) { | 
|  | 5058 | i915_gem_object_unbind(&obj->base); | 
|  | 5059 | if (--nr_to_scan == 0) | 
|  | 5060 | break; | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5061 | } | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5062 | } | 
|  | 5063 |  | 
|  | 5064 | /* second pass, evict/count anything still on the inactive list */ | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5065 | cnt = 0; | 
|  | 5066 | list_for_each_entry_safe(obj, next, | 
|  | 5067 | &dev_priv->mm.inactive_list, | 
|  | 5068 | mm_list) { | 
|  | 5069 | if (nr_to_scan) { | 
|  | 5070 | i915_gem_object_unbind(&obj->base); | 
|  | 5071 | nr_to_scan--; | 
|  | 5072 | } else | 
|  | 5073 | cnt++; | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5074 | } | 
|  | 5075 |  | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5076 | if (nr_to_scan && i915_gpu_is_active(dev)) { | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5077 | /* | 
|  | 5078 | * We are desperate for pages, so as a last resort, wait | 
|  | 5079 | * for the GPU to finish and discard whatever we can. | 
|  | 5080 | * This has a dramatic impact to reduce the number of | 
|  | 5081 | * OOM-killer events whilst running the GPU aggressively. | 
|  | 5082 | */ | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5083 | if (i915_gpu_idle(dev) == 0) | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5084 | goto rescan; | 
|  | 5085 | } | 
| Chris Wilson | 17250b7 | 2010-10-28 12:51:39 +0100 | [diff] [blame] | 5086 | mutex_unlock(&dev->struct_mutex); | 
|  | 5087 | return cnt / 100 * sysctl_vfs_cache_pressure; | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5088 | } |