| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright © 2008 Intel Corporation | 
|  | 3 | * | 
|  | 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|  | 5 | * copy of this software and associated documentation files (the "Software"), | 
|  | 6 | * to deal in the Software without restriction, including without limitation | 
|  | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|  | 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
|  | 9 | * Software is furnished to do so, subject to the following conditions: | 
|  | 10 | * | 
|  | 11 | * The above copyright notice and this permission notice (including the next | 
|  | 12 | * paragraph) shall be included in all copies or substantial portions of the | 
|  | 13 | * Software. | 
|  | 14 | * | 
|  | 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|  | 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 
|  | 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 
|  | 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 
|  | 21 | * IN THE SOFTWARE. | 
|  | 22 | * | 
|  | 23 | * Authors: | 
|  | 24 | *    Eric Anholt <eric@anholt.net> | 
|  | 25 | * | 
|  | 26 | */ | 
|  | 27 |  | 
|  | 28 | #include "drmP.h" | 
|  | 29 | #include "drm.h" | 
|  | 30 | #include "i915_drm.h" | 
|  | 31 | #include "i915_drv.h" | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 32 | #include "i915_trace.h" | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 33 | #include "intel_drv.h" | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 35 | #include <linux/swap.h> | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 36 | #include <linux/pci.h> | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 37 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 38 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 
|  | 40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 41 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 
|  | 42 | int write); | 
|  | 43 | static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 
|  | 44 | uint64_t offset, | 
|  | 45 | uint64_t size); | 
|  | 46 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 47 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 
|  | 49 | unsigned alignment); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 51 | static int i915_gem_evict_something(struct drm_device *dev, int min_size); | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 52 | static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 54 | struct drm_i915_gem_pwrite *args, | 
|  | 55 | struct drm_file *file_priv); | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 56 | static void i915_gem_free_object_tail(struct drm_gem_object *obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 57 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 58 | static LIST_HEAD(shrink_list); | 
|  | 59 | static DEFINE_SPINLOCK(shrink_list_lock); | 
|  | 60 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 61 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 
|  | 62 | unsigned long end) | 
|  | 63 | { | 
|  | 64 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 65 |  | 
|  | 66 | if (start >= end || | 
|  | 67 | (start & (PAGE_SIZE - 1)) != 0 || | 
|  | 68 | (end & (PAGE_SIZE - 1)) != 0) { | 
|  | 69 | return -EINVAL; | 
|  | 70 | } | 
|  | 71 |  | 
|  | 72 | drm_mm_init(&dev_priv->mm.gtt_space, start, | 
|  | 73 | end - start); | 
|  | 74 |  | 
|  | 75 | dev->gtt_total = (uint32_t) (end - start); | 
|  | 76 |  | 
|  | 77 | return 0; | 
|  | 78 | } | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 79 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 80 | int | 
|  | 81 | i915_gem_init_ioctl(struct drm_device *dev, void *data, | 
|  | 82 | struct drm_file *file_priv) | 
|  | 83 | { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 84 | struct drm_i915_gem_init *args = data; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 85 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 86 |  | 
|  | 87 | mutex_lock(&dev->struct_mutex); | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 88 | ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 89 | mutex_unlock(&dev->struct_mutex); | 
|  | 90 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 91 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 92 | } | 
|  | 93 |  | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 94 | int | 
|  | 95 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 
|  | 96 | struct drm_file *file_priv) | 
|  | 97 | { | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 98 | struct drm_i915_gem_get_aperture *args = data; | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 99 |  | 
|  | 100 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 101 | return -ENODEV; | 
|  | 102 |  | 
|  | 103 | args->aper_size = dev->gtt_total; | 
| Keith Packard | 2678d9d | 2008-11-20 22:54:54 -0800 | [diff] [blame] | 104 | args->aper_available_size = (args->aper_size - | 
|  | 105 | atomic_read(&dev->pin_memory)); | 
| Eric Anholt | 5a125c3 | 2008-10-22 21:40:13 -0700 | [diff] [blame] | 106 |  | 
|  | 107 | return 0; | 
|  | 108 | } | 
|  | 109 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 110 |  | 
|  | 111 | /** | 
|  | 112 | * Creates a new mm object and returns a handle to it. | 
|  | 113 | */ | 
|  | 114 | int | 
|  | 115 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | 
|  | 116 | struct drm_file *file_priv) | 
|  | 117 | { | 
|  | 118 | struct drm_i915_gem_create *args = data; | 
|  | 119 | struct drm_gem_object *obj; | 
| Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 120 | int ret; | 
|  | 121 | u32 handle; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 122 |  | 
|  | 123 | args->size = roundup(args->size, PAGE_SIZE); | 
|  | 124 |  | 
|  | 125 | /* Allocate the new object */ | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 126 | obj = i915_gem_alloc_object(dev, args->size); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 127 | if (obj == NULL) | 
|  | 128 | return -ENOMEM; | 
|  | 129 |  | 
|  | 130 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 
| Chris Wilson | 86f100b | 2010-07-24 21:03:49 +0100 | [diff] [blame] | 131 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 132 | if (ret) | 
|  | 133 | return ret; | 
|  | 134 |  | 
|  | 135 | args->handle = handle; | 
|  | 136 |  | 
|  | 137 | return 0; | 
|  | 138 | } | 
|  | 139 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 140 | static inline int | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 141 | fast_shmem_read(struct page **pages, | 
|  | 142 | loff_t page_base, int page_offset, | 
|  | 143 | char __user *data, | 
|  | 144 | int length) | 
|  | 145 | { | 
|  | 146 | char __iomem *vaddr; | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 147 | int unwritten; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 148 |  | 
|  | 149 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | 
|  | 150 | if (vaddr == NULL) | 
|  | 151 | return -ENOMEM; | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 152 | unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 153 | kunmap_atomic(vaddr, KM_USER0); | 
|  | 154 |  | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 155 | if (unwritten) | 
|  | 156 | return -EFAULT; | 
|  | 157 |  | 
|  | 158 | return 0; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 159 | } | 
|  | 160 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 161 | static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) | 
|  | 162 | { | 
|  | 163 | drm_i915_private_t *dev_priv = obj->dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 164 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 165 |  | 
|  | 166 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | 
|  | 167 | obj_priv->tiling_mode != I915_TILING_NONE; | 
|  | 168 | } | 
|  | 169 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 170 | static inline void | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 171 | slow_shmem_copy(struct page *dst_page, | 
|  | 172 | int dst_offset, | 
|  | 173 | struct page *src_page, | 
|  | 174 | int src_offset, | 
|  | 175 | int length) | 
|  | 176 | { | 
|  | 177 | char *dst_vaddr, *src_vaddr; | 
|  | 178 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 179 | dst_vaddr = kmap(dst_page); | 
|  | 180 | src_vaddr = kmap(src_page); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 181 |  | 
|  | 182 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | 
|  | 183 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 184 | kunmap(src_page); | 
|  | 185 | kunmap(dst_page); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 186 | } | 
|  | 187 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 188 | static inline void | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 189 | slow_shmem_bit17_copy(struct page *gpu_page, | 
|  | 190 | int gpu_offset, | 
|  | 191 | struct page *cpu_page, | 
|  | 192 | int cpu_offset, | 
|  | 193 | int length, | 
|  | 194 | int is_read) | 
|  | 195 | { | 
|  | 196 | char *gpu_vaddr, *cpu_vaddr; | 
|  | 197 |  | 
|  | 198 | /* Use the unswizzled path if this page isn't affected. */ | 
|  | 199 | if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { | 
|  | 200 | if (is_read) | 
|  | 201 | return slow_shmem_copy(cpu_page, cpu_offset, | 
|  | 202 | gpu_page, gpu_offset, length); | 
|  | 203 | else | 
|  | 204 | return slow_shmem_copy(gpu_page, gpu_offset, | 
|  | 205 | cpu_page, cpu_offset, length); | 
|  | 206 | } | 
|  | 207 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 208 | gpu_vaddr = kmap(gpu_page); | 
|  | 209 | cpu_vaddr = kmap(cpu_page); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 210 |  | 
|  | 211 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's | 
|  | 212 | * XORing with the other bits (A9 for Y, A9 and A10 for X) | 
|  | 213 | */ | 
|  | 214 | while (length > 0) { | 
|  | 215 | int cacheline_end = ALIGN(gpu_offset + 1, 64); | 
|  | 216 | int this_length = min(cacheline_end - gpu_offset, length); | 
|  | 217 | int swizzled_gpu_offset = gpu_offset ^ 64; | 
|  | 218 |  | 
|  | 219 | if (is_read) { | 
|  | 220 | memcpy(cpu_vaddr + cpu_offset, | 
|  | 221 | gpu_vaddr + swizzled_gpu_offset, | 
|  | 222 | this_length); | 
|  | 223 | } else { | 
|  | 224 | memcpy(gpu_vaddr + swizzled_gpu_offset, | 
|  | 225 | cpu_vaddr + cpu_offset, | 
|  | 226 | this_length); | 
|  | 227 | } | 
|  | 228 | cpu_offset += this_length; | 
|  | 229 | gpu_offset += this_length; | 
|  | 230 | length -= this_length; | 
|  | 231 | } | 
|  | 232 |  | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 233 | kunmap(cpu_page); | 
|  | 234 | kunmap(gpu_page); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 235 | } | 
|  | 236 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 237 | /** | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 238 | * This is the fast shmem pread path, which attempts to copy_from_user directly | 
|  | 239 | * from the backing pages of the object to the user's address space.  On a | 
|  | 240 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | 
|  | 241 | */ | 
|  | 242 | static int | 
|  | 243 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 244 | struct drm_i915_gem_pread *args, | 
|  | 245 | struct drm_file *file_priv) | 
|  | 246 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 247 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 248 | ssize_t remain; | 
|  | 249 | loff_t offset, page_base; | 
|  | 250 | char __user *user_data; | 
|  | 251 | int page_offset, page_length; | 
|  | 252 | int ret; | 
|  | 253 |  | 
|  | 254 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 255 | remain = args->size; | 
|  | 256 |  | 
|  | 257 | mutex_lock(&dev->struct_mutex); | 
|  | 258 |  | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 259 | ret = i915_gem_object_get_pages(obj, 0); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 260 | if (ret != 0) | 
|  | 261 | goto fail_unlock; | 
|  | 262 |  | 
|  | 263 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 
|  | 264 | args->size); | 
|  | 265 | if (ret != 0) | 
|  | 266 | goto fail_put_pages; | 
|  | 267 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 268 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 269 | offset = args->offset; | 
|  | 270 |  | 
|  | 271 | while (remain > 0) { | 
|  | 272 | /* Operation in this page | 
|  | 273 | * | 
|  | 274 | * page_base = page offset within aperture | 
|  | 275 | * page_offset = offset within page | 
|  | 276 | * page_length = bytes to copy for this page | 
|  | 277 | */ | 
|  | 278 | page_base = (offset & ~(PAGE_SIZE-1)); | 
|  | 279 | page_offset = offset & (PAGE_SIZE-1); | 
|  | 280 | page_length = remain; | 
|  | 281 | if ((page_offset + remain) > PAGE_SIZE) | 
|  | 282 | page_length = PAGE_SIZE - page_offset; | 
|  | 283 |  | 
|  | 284 | ret = fast_shmem_read(obj_priv->pages, | 
|  | 285 | page_base, page_offset, | 
|  | 286 | user_data, page_length); | 
|  | 287 | if (ret) | 
|  | 288 | goto fail_put_pages; | 
|  | 289 |  | 
|  | 290 | remain -= page_length; | 
|  | 291 | user_data += page_length; | 
|  | 292 | offset += page_length; | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | fail_put_pages: | 
|  | 296 | i915_gem_object_put_pages(obj); | 
|  | 297 | fail_unlock: | 
|  | 298 | mutex_unlock(&dev->struct_mutex); | 
|  | 299 |  | 
|  | 300 | return ret; | 
|  | 301 | } | 
|  | 302 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 303 | static int | 
|  | 304 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 
|  | 305 | { | 
|  | 306 | int ret; | 
|  | 307 |  | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 308 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 309 |  | 
|  | 310 | /* If we've insufficient memory to map in the pages, attempt | 
|  | 311 | * to make some space by throwing out some old buffers. | 
|  | 312 | */ | 
|  | 313 | if (ret == -ENOMEM) { | 
|  | 314 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 315 |  | 
|  | 316 | ret = i915_gem_evict_something(dev, obj->size); | 
|  | 317 | if (ret) | 
|  | 318 | return ret; | 
|  | 319 |  | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 320 | ret = i915_gem_object_get_pages(obj, 0); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 321 | } | 
|  | 322 |  | 
|  | 323 | return ret; | 
|  | 324 | } | 
|  | 325 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 326 | /** | 
|  | 327 | * This is the fallback shmem pread path, which allocates temporary storage | 
|  | 328 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | 
|  | 329 | * can copy out of the object's backing pages while holding the struct mutex | 
|  | 330 | * and not take page faults. | 
|  | 331 | */ | 
|  | 332 | static int | 
|  | 333 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 334 | struct drm_i915_gem_pread *args, | 
|  | 335 | struct drm_file *file_priv) | 
|  | 336 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 337 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 338 | struct mm_struct *mm = current->mm; | 
|  | 339 | struct page **user_pages; | 
|  | 340 | ssize_t remain; | 
|  | 341 | loff_t offset, pinned_pages, i; | 
|  | 342 | loff_t first_data_page, last_data_page, num_pages; | 
|  | 343 | int shmem_page_index, shmem_page_offset; | 
|  | 344 | int data_page_index,  data_page_offset; | 
|  | 345 | int page_length; | 
|  | 346 | int ret; | 
|  | 347 | uint64_t data_ptr = args->data_ptr; | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 348 | int do_bit17_swizzling; | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 349 |  | 
|  | 350 | remain = args->size; | 
|  | 351 |  | 
|  | 352 | /* Pin the user pages containing the data.  We can't fault while | 
|  | 353 | * holding the struct mutex, yet we want to hold it while | 
|  | 354 | * dereferencing the user data. | 
|  | 355 | */ | 
|  | 356 | first_data_page = data_ptr / PAGE_SIZE; | 
|  | 357 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 
|  | 358 | num_pages = last_data_page - first_data_page + 1; | 
|  | 359 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 360 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 361 | if (user_pages == NULL) | 
|  | 362 | return -ENOMEM; | 
|  | 363 |  | 
|  | 364 | down_read(&mm->mmap_sem); | 
|  | 365 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 
| Eric Anholt | e5e9ecd | 2009-04-07 16:01:22 -0700 | [diff] [blame] | 366 | num_pages, 1, 0, user_pages, NULL); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 367 | up_read(&mm->mmap_sem); | 
|  | 368 | if (pinned_pages < num_pages) { | 
|  | 369 | ret = -EFAULT; | 
|  | 370 | goto fail_put_user_pages; | 
|  | 371 | } | 
|  | 372 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 373 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 
|  | 374 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 375 | mutex_lock(&dev->struct_mutex); | 
|  | 376 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 377 | ret = i915_gem_object_get_pages_or_evict(obj); | 
|  | 378 | if (ret) | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 379 | goto fail_unlock; | 
|  | 380 |  | 
|  | 381 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 
|  | 382 | args->size); | 
|  | 383 | if (ret != 0) | 
|  | 384 | goto fail_put_pages; | 
|  | 385 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 386 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 387 | offset = args->offset; | 
|  | 388 |  | 
|  | 389 | while (remain > 0) { | 
|  | 390 | /* Operation in this page | 
|  | 391 | * | 
|  | 392 | * shmem_page_index = page number within shmem file | 
|  | 393 | * shmem_page_offset = offset within page in shmem file | 
|  | 394 | * data_page_index = page number in get_user_pages return | 
|  | 395 | * data_page_offset = offset with data_page_index page. | 
|  | 396 | * page_length = bytes to copy for this page | 
|  | 397 | */ | 
|  | 398 | shmem_page_index = offset / PAGE_SIZE; | 
|  | 399 | shmem_page_offset = offset & ~PAGE_MASK; | 
|  | 400 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 
|  | 401 | data_page_offset = data_ptr & ~PAGE_MASK; | 
|  | 402 |  | 
|  | 403 | page_length = remain; | 
|  | 404 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 
|  | 405 | page_length = PAGE_SIZE - shmem_page_offset; | 
|  | 406 | if ((data_page_offset + page_length) > PAGE_SIZE) | 
|  | 407 | page_length = PAGE_SIZE - data_page_offset; | 
|  | 408 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 409 | if (do_bit17_swizzling) { | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 410 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 411 | shmem_page_offset, | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 412 | user_pages[data_page_index], | 
|  | 413 | data_page_offset, | 
|  | 414 | page_length, | 
|  | 415 | 1); | 
|  | 416 | } else { | 
|  | 417 | slow_shmem_copy(user_pages[data_page_index], | 
|  | 418 | data_page_offset, | 
|  | 419 | obj_priv->pages[shmem_page_index], | 
|  | 420 | shmem_page_offset, | 
|  | 421 | page_length); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 422 | } | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 423 |  | 
|  | 424 | remain -= page_length; | 
|  | 425 | data_ptr += page_length; | 
|  | 426 | offset += page_length; | 
|  | 427 | } | 
|  | 428 |  | 
|  | 429 | fail_put_pages: | 
|  | 430 | i915_gem_object_put_pages(obj); | 
|  | 431 | fail_unlock: | 
|  | 432 | mutex_unlock(&dev->struct_mutex); | 
|  | 433 | fail_put_user_pages: | 
|  | 434 | for (i = 0; i < pinned_pages; i++) { | 
|  | 435 | SetPageDirty(user_pages[i]); | 
|  | 436 | page_cache_release(user_pages[i]); | 
|  | 437 | } | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 438 | drm_free_large(user_pages); | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 439 |  | 
|  | 440 | return ret; | 
|  | 441 | } | 
|  | 442 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 443 | /** | 
|  | 444 | * Reads data from the object referenced by handle. | 
|  | 445 | * | 
|  | 446 | * On error, the contents of *data are undefined. | 
|  | 447 | */ | 
|  | 448 | int | 
|  | 449 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | 
|  | 450 | struct drm_file *file_priv) | 
|  | 451 | { | 
|  | 452 | struct drm_i915_gem_pread *args = data; | 
|  | 453 | struct drm_gem_object *obj; | 
|  | 454 | struct drm_i915_gem_object *obj_priv; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 455 | int ret; | 
|  | 456 |  | 
|  | 457 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 458 | if (obj == NULL) | 
|  | 459 | return -EBADF; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 460 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 461 |  | 
|  | 462 | /* Bounds check source. | 
|  | 463 | * | 
|  | 464 | * XXX: This could use review for overflow issues... | 
|  | 465 | */ | 
|  | 466 | if (args->offset > obj->size || args->size > obj->size || | 
|  | 467 | args->offset + args->size > obj->size) { | 
| Luca Barbieri | bc9025b | 2010-02-09 05:49:12 +0000 | [diff] [blame] | 468 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 469 | return -EINVAL; | 
|  | 470 | } | 
|  | 471 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 472 | if (i915_gem_object_needs_bit17_swizzle(obj)) { | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 473 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 474 | } else { | 
|  | 475 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); | 
|  | 476 | if (ret != 0) | 
|  | 477 | ret = i915_gem_shmem_pread_slow(dev, obj, args, | 
|  | 478 | file_priv); | 
|  | 479 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 480 |  | 
| Luca Barbieri | bc9025b | 2010-02-09 05:49:12 +0000 | [diff] [blame] | 481 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 482 |  | 
| Eric Anholt | eb01459 | 2009-03-10 11:44:52 -0700 | [diff] [blame] | 483 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 484 | } | 
|  | 485 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 486 | /* This is the fast write path which cannot handle | 
|  | 487 | * page faults in the source data | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 488 | */ | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 489 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 490 | static inline int | 
|  | 491 | fast_user_write(struct io_mapping *mapping, | 
|  | 492 | loff_t page_base, int page_offset, | 
|  | 493 | char __user *user_data, | 
|  | 494 | int length) | 
|  | 495 | { | 
|  | 496 | char *vaddr_atomic; | 
|  | 497 | unsigned long unwritten; | 
|  | 498 |  | 
| Chris Wilson | fca3ec0 | 2010-08-04 14:34:24 +0100 | [diff] [blame] | 499 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 500 | unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, | 
|  | 501 | user_data, length); | 
| Chris Wilson | fca3ec0 | 2010-08-04 14:34:24 +0100 | [diff] [blame] | 502 | io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 503 | if (unwritten) | 
|  | 504 | return -EFAULT; | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 505 | return 0; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 506 | } | 
|  | 507 |  | 
|  | 508 | /* Here's the write path which can sleep for | 
|  | 509 | * page faults | 
|  | 510 | */ | 
|  | 511 |  | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 512 | static inline void | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 513 | slow_kernel_write(struct io_mapping *mapping, | 
|  | 514 | loff_t gtt_base, int gtt_offset, | 
|  | 515 | struct page *user_page, int user_offset, | 
|  | 516 | int length) | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 517 | { | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 518 | char __iomem *dst_vaddr; | 
|  | 519 | char *src_vaddr; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 520 |  | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 521 | dst_vaddr = io_mapping_map_wc(mapping, gtt_base); | 
|  | 522 | src_vaddr = kmap(user_page); | 
|  | 523 |  | 
|  | 524 | memcpy_toio(dst_vaddr + gtt_offset, | 
|  | 525 | src_vaddr + user_offset, | 
|  | 526 | length); | 
|  | 527 |  | 
|  | 528 | kunmap(user_page); | 
|  | 529 | io_mapping_unmap(dst_vaddr); | 
| Linus Torvalds | 9b7530cc | 2008-10-20 14:16:43 -0700 | [diff] [blame] | 530 | } | 
|  | 531 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 532 | static inline int | 
|  | 533 | fast_shmem_write(struct page **pages, | 
|  | 534 | loff_t page_base, int page_offset, | 
|  | 535 | char __user *data, | 
|  | 536 | int length) | 
|  | 537 | { | 
|  | 538 | char __iomem *vaddr; | 
| Dave Airlie | d008877 | 2009-03-28 20:29:48 -0400 | [diff] [blame] | 539 | unsigned long unwritten; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 540 |  | 
|  | 541 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | 
|  | 542 | if (vaddr == NULL) | 
|  | 543 | return -ENOMEM; | 
| Dave Airlie | d008877 | 2009-03-28 20:29:48 -0400 | [diff] [blame] | 544 | unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 545 | kunmap_atomic(vaddr, KM_USER0); | 
|  | 546 |  | 
| Dave Airlie | d008877 | 2009-03-28 20:29:48 -0400 | [diff] [blame] | 547 | if (unwritten) | 
|  | 548 | return -EFAULT; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 549 | return 0; | 
|  | 550 | } | 
|  | 551 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 552 | /** | 
|  | 553 | * This is the fast pwrite path, where we copy the data directly from the | 
|  | 554 | * user into the GTT, uncached. | 
|  | 555 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 556 | static int | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 557 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 558 | struct drm_i915_gem_pwrite *args, | 
|  | 559 | struct drm_file *file_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 560 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 561 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 562 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 563 | ssize_t remain; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 564 | loff_t offset, page_base; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 565 | char __user *user_data; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 566 | int page_offset, page_length; | 
|  | 567 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 568 |  | 
|  | 569 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 570 | remain = args->size; | 
|  | 571 | if (!access_ok(VERIFY_READ, user_data, remain)) | 
|  | 572 | return -EFAULT; | 
|  | 573 |  | 
|  | 574 |  | 
|  | 575 | mutex_lock(&dev->struct_mutex); | 
|  | 576 | ret = i915_gem_object_pin(obj, 0); | 
|  | 577 | if (ret) { | 
|  | 578 | mutex_unlock(&dev->struct_mutex); | 
|  | 579 | return ret; | 
|  | 580 | } | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 581 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 582 | if (ret) | 
|  | 583 | goto fail; | 
|  | 584 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 585 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 586 | offset = obj_priv->gtt_offset + args->offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 587 |  | 
|  | 588 | while (remain > 0) { | 
|  | 589 | /* Operation in this page | 
|  | 590 | * | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 591 | * page_base = page offset within aperture | 
|  | 592 | * page_offset = offset within page | 
|  | 593 | * page_length = bytes to copy for this page | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 594 | */ | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 595 | page_base = (offset & ~(PAGE_SIZE-1)); | 
|  | 596 | page_offset = offset & (PAGE_SIZE-1); | 
|  | 597 | page_length = remain; | 
|  | 598 | if ((page_offset + remain) > PAGE_SIZE) | 
|  | 599 | page_length = PAGE_SIZE - page_offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 600 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 601 | ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, | 
|  | 602 | page_offset, user_data, page_length); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 603 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 604 | /* If we get a fault while copying data, then (presumably) our | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 605 | * source page isn't available.  Return the error and we'll | 
|  | 606 | * retry in the slow path. | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 607 | */ | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 608 | if (ret) | 
|  | 609 | goto fail; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 610 |  | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 611 | remain -= page_length; | 
|  | 612 | user_data += page_length; | 
|  | 613 | offset += page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 614 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 615 |  | 
|  | 616 | fail: | 
|  | 617 | i915_gem_object_unpin(obj); | 
|  | 618 | mutex_unlock(&dev->struct_mutex); | 
|  | 619 |  | 
|  | 620 | return ret; | 
|  | 621 | } | 
|  | 622 |  | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 623 | /** | 
|  | 624 | * This is the fallback GTT pwrite path, which uses get_user_pages to pin | 
|  | 625 | * the memory and maps it using kmap_atomic for copying. | 
|  | 626 | * | 
|  | 627 | * This code resulted in x11perf -rgb10text consuming about 10% more CPU | 
|  | 628 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | 
|  | 629 | */ | 
| Eric Anholt | 3043c60 | 2008-10-02 12:24:47 -0700 | [diff] [blame] | 630 | static int | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 631 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 632 | struct drm_i915_gem_pwrite *args, | 
|  | 633 | struct drm_file *file_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 634 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 635 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 636 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 637 | ssize_t remain; | 
|  | 638 | loff_t gtt_page_base, offset; | 
|  | 639 | loff_t first_data_page, last_data_page, num_pages; | 
|  | 640 | loff_t pinned_pages, i; | 
|  | 641 | struct page **user_pages; | 
|  | 642 | struct mm_struct *mm = current->mm; | 
|  | 643 | int gtt_page_offset, data_page_offset, data_page_index, page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 644 | int ret; | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 645 | uint64_t data_ptr = args->data_ptr; | 
|  | 646 |  | 
|  | 647 | remain = args->size; | 
|  | 648 |  | 
|  | 649 | /* Pin the user pages containing the data.  We can't fault while | 
|  | 650 | * holding the struct mutex, and all of the pwrite implementations | 
|  | 651 | * want to hold it while dereferencing the user data. | 
|  | 652 | */ | 
|  | 653 | first_data_page = data_ptr / PAGE_SIZE; | 
|  | 654 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 
|  | 655 | num_pages = last_data_page - first_data_page + 1; | 
|  | 656 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 657 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 658 | if (user_pages == NULL) | 
|  | 659 | return -ENOMEM; | 
|  | 660 |  | 
|  | 661 | down_read(&mm->mmap_sem); | 
|  | 662 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 
|  | 663 | num_pages, 0, 0, user_pages, NULL); | 
|  | 664 | up_read(&mm->mmap_sem); | 
|  | 665 | if (pinned_pages < num_pages) { | 
|  | 666 | ret = -EFAULT; | 
|  | 667 | goto out_unpin_pages; | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | mutex_lock(&dev->struct_mutex); | 
|  | 671 | ret = i915_gem_object_pin(obj, 0); | 
|  | 672 | if (ret) | 
|  | 673 | goto out_unlock; | 
|  | 674 |  | 
|  | 675 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 
|  | 676 | if (ret) | 
|  | 677 | goto out_unpin_object; | 
|  | 678 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 679 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 680 | offset = obj_priv->gtt_offset + args->offset; | 
|  | 681 |  | 
|  | 682 | while (remain > 0) { | 
|  | 683 | /* Operation in this page | 
|  | 684 | * | 
|  | 685 | * gtt_page_base = page offset within aperture | 
|  | 686 | * gtt_page_offset = offset within page in aperture | 
|  | 687 | * data_page_index = page number in get_user_pages return | 
|  | 688 | * data_page_offset = offset with data_page_index page. | 
|  | 689 | * page_length = bytes to copy for this page | 
|  | 690 | */ | 
|  | 691 | gtt_page_base = offset & PAGE_MASK; | 
|  | 692 | gtt_page_offset = offset & ~PAGE_MASK; | 
|  | 693 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 
|  | 694 | data_page_offset = data_ptr & ~PAGE_MASK; | 
|  | 695 |  | 
|  | 696 | page_length = remain; | 
|  | 697 | if ((gtt_page_offset + page_length) > PAGE_SIZE) | 
|  | 698 | page_length = PAGE_SIZE - gtt_page_offset; | 
|  | 699 | if ((data_page_offset + page_length) > PAGE_SIZE) | 
|  | 700 | page_length = PAGE_SIZE - data_page_offset; | 
|  | 701 |  | 
| Chris Wilson | ab34c22 | 2010-05-27 14:15:35 +0100 | [diff] [blame] | 702 | slow_kernel_write(dev_priv->mm.gtt_mapping, | 
|  | 703 | gtt_page_base, gtt_page_offset, | 
|  | 704 | user_pages[data_page_index], | 
|  | 705 | data_page_offset, | 
|  | 706 | page_length); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 707 |  | 
|  | 708 | remain -= page_length; | 
|  | 709 | offset += page_length; | 
|  | 710 | data_ptr += page_length; | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | out_unpin_object: | 
|  | 714 | i915_gem_object_unpin(obj); | 
|  | 715 | out_unlock: | 
|  | 716 | mutex_unlock(&dev->struct_mutex); | 
|  | 717 | out_unpin_pages: | 
|  | 718 | for (i = 0; i < pinned_pages; i++) | 
|  | 719 | page_cache_release(user_pages[i]); | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 720 | drm_free_large(user_pages); | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 721 |  | 
|  | 722 | return ret; | 
|  | 723 | } | 
|  | 724 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 725 | /** | 
|  | 726 | * This is the fast shmem pwrite path, which attempts to directly | 
|  | 727 | * copy_from_user into the kmapped pages backing the object. | 
|  | 728 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 729 | static int | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 730 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 731 | struct drm_i915_gem_pwrite *args, | 
|  | 732 | struct drm_file *file_priv) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 733 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 734 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 735 | ssize_t remain; | 
|  | 736 | loff_t offset, page_base; | 
|  | 737 | char __user *user_data; | 
|  | 738 | int page_offset, page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 739 | int ret; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 740 |  | 
|  | 741 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 742 | remain = args->size; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 743 |  | 
|  | 744 | mutex_lock(&dev->struct_mutex); | 
|  | 745 |  | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 746 | ret = i915_gem_object_get_pages(obj, 0); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 747 | if (ret != 0) | 
|  | 748 | goto fail_unlock; | 
|  | 749 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 750 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 751 | if (ret != 0) | 
|  | 752 | goto fail_put_pages; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 753 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 754 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 755 | offset = args->offset; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 756 | obj_priv->dirty = 1; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 757 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 758 | while (remain > 0) { | 
|  | 759 | /* Operation in this page | 
|  | 760 | * | 
|  | 761 | * page_base = page offset within aperture | 
|  | 762 | * page_offset = offset within page | 
|  | 763 | * page_length = bytes to copy for this page | 
|  | 764 | */ | 
|  | 765 | page_base = (offset & ~(PAGE_SIZE-1)); | 
|  | 766 | page_offset = offset & (PAGE_SIZE-1); | 
|  | 767 | page_length = remain; | 
|  | 768 | if ((page_offset + remain) > PAGE_SIZE) | 
|  | 769 | page_length = PAGE_SIZE - page_offset; | 
|  | 770 |  | 
|  | 771 | ret = fast_shmem_write(obj_priv->pages, | 
|  | 772 | page_base, page_offset, | 
|  | 773 | user_data, page_length); | 
|  | 774 | if (ret) | 
|  | 775 | goto fail_put_pages; | 
|  | 776 |  | 
|  | 777 | remain -= page_length; | 
|  | 778 | user_data += page_length; | 
|  | 779 | offset += page_length; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 780 | } | 
|  | 781 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 782 | fail_put_pages: | 
|  | 783 | i915_gem_object_put_pages(obj); | 
|  | 784 | fail_unlock: | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 785 | mutex_unlock(&dev->struct_mutex); | 
|  | 786 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 787 | return ret; | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | /** | 
|  | 791 | * This is the fallback shmem pwrite path, which uses get_user_pages to pin | 
|  | 792 | * the memory and maps it using kmap_atomic for copying. | 
|  | 793 | * | 
|  | 794 | * This avoids taking mmap_sem for faulting on the user's address while the | 
|  | 795 | * struct_mutex is held. | 
|  | 796 | */ | 
|  | 797 | static int | 
|  | 798 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 799 | struct drm_i915_gem_pwrite *args, | 
|  | 800 | struct drm_file *file_priv) | 
|  | 801 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 802 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 803 | struct mm_struct *mm = current->mm; | 
|  | 804 | struct page **user_pages; | 
|  | 805 | ssize_t remain; | 
|  | 806 | loff_t offset, pinned_pages, i; | 
|  | 807 | loff_t first_data_page, last_data_page, num_pages; | 
|  | 808 | int shmem_page_index, shmem_page_offset; | 
|  | 809 | int data_page_index,  data_page_offset; | 
|  | 810 | int page_length; | 
|  | 811 | int ret; | 
|  | 812 | uint64_t data_ptr = args->data_ptr; | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 813 | int do_bit17_swizzling; | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 814 |  | 
|  | 815 | remain = args->size; | 
|  | 816 |  | 
|  | 817 | /* Pin the user pages containing the data.  We can't fault while | 
|  | 818 | * holding the struct mutex, and all of the pwrite implementations | 
|  | 819 | * want to hold it while dereferencing the user data. | 
|  | 820 | */ | 
|  | 821 | first_data_page = data_ptr / PAGE_SIZE; | 
|  | 822 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | 
|  | 823 | num_pages = last_data_page - first_data_page + 1; | 
|  | 824 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 825 | user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 826 | if (user_pages == NULL) | 
|  | 827 | return -ENOMEM; | 
|  | 828 |  | 
|  | 829 | down_read(&mm->mmap_sem); | 
|  | 830 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | 
|  | 831 | num_pages, 0, 0, user_pages, NULL); | 
|  | 832 | up_read(&mm->mmap_sem); | 
|  | 833 | if (pinned_pages < num_pages) { | 
|  | 834 | ret = -EFAULT; | 
|  | 835 | goto fail_put_user_pages; | 
|  | 836 | } | 
|  | 837 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 838 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); | 
|  | 839 |  | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 840 | mutex_lock(&dev->struct_mutex); | 
|  | 841 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 842 | ret = i915_gem_object_get_pages_or_evict(obj); | 
|  | 843 | if (ret) | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 844 | goto fail_unlock; | 
|  | 845 |  | 
|  | 846 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 
|  | 847 | if (ret != 0) | 
|  | 848 | goto fail_put_pages; | 
|  | 849 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 850 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 851 | offset = args->offset; | 
|  | 852 | obj_priv->dirty = 1; | 
|  | 853 |  | 
|  | 854 | while (remain > 0) { | 
|  | 855 | /* Operation in this page | 
|  | 856 | * | 
|  | 857 | * shmem_page_index = page number within shmem file | 
|  | 858 | * shmem_page_offset = offset within page in shmem file | 
|  | 859 | * data_page_index = page number in get_user_pages return | 
|  | 860 | * data_page_offset = offset with data_page_index page. | 
|  | 861 | * page_length = bytes to copy for this page | 
|  | 862 | */ | 
|  | 863 | shmem_page_index = offset / PAGE_SIZE; | 
|  | 864 | shmem_page_offset = offset & ~PAGE_MASK; | 
|  | 865 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | 
|  | 866 | data_page_offset = data_ptr & ~PAGE_MASK; | 
|  | 867 |  | 
|  | 868 | page_length = remain; | 
|  | 869 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | 
|  | 870 | page_length = PAGE_SIZE - shmem_page_offset; | 
|  | 871 | if ((data_page_offset + page_length) > PAGE_SIZE) | 
|  | 872 | page_length = PAGE_SIZE - data_page_offset; | 
|  | 873 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 874 | if (do_bit17_swizzling) { | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 875 | slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 876 | shmem_page_offset, | 
|  | 877 | user_pages[data_page_index], | 
|  | 878 | data_page_offset, | 
| Chris Wilson | 99a03df | 2010-05-27 14:15:34 +0100 | [diff] [blame] | 879 | page_length, | 
|  | 880 | 0); | 
|  | 881 | } else { | 
|  | 882 | slow_shmem_copy(obj_priv->pages[shmem_page_index], | 
|  | 883 | shmem_page_offset, | 
|  | 884 | user_pages[data_page_index], | 
|  | 885 | data_page_offset, | 
|  | 886 | page_length); | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 887 | } | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 888 |  | 
|  | 889 | remain -= page_length; | 
|  | 890 | data_ptr += page_length; | 
|  | 891 | offset += page_length; | 
|  | 892 | } | 
|  | 893 |  | 
|  | 894 | fail_put_pages: | 
|  | 895 | i915_gem_object_put_pages(obj); | 
|  | 896 | fail_unlock: | 
|  | 897 | mutex_unlock(&dev->struct_mutex); | 
|  | 898 | fail_put_user_pages: | 
|  | 899 | for (i = 0; i < pinned_pages; i++) | 
|  | 900 | page_cache_release(user_pages[i]); | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 901 | drm_free_large(user_pages); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 902 |  | 
|  | 903 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 904 | } | 
|  | 905 |  | 
|  | 906 | /** | 
|  | 907 | * Writes data to the object referenced by handle. | 
|  | 908 | * | 
|  | 909 | * On error, the contents of the buffer that were to be modified are undefined. | 
|  | 910 | */ | 
|  | 911 | int | 
|  | 912 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | 
|  | 913 | struct drm_file *file_priv) | 
|  | 914 | { | 
|  | 915 | struct drm_i915_gem_pwrite *args = data; | 
|  | 916 | struct drm_gem_object *obj; | 
|  | 917 | struct drm_i915_gem_object *obj_priv; | 
|  | 918 | int ret = 0; | 
|  | 919 |  | 
|  | 920 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 921 | if (obj == NULL) | 
|  | 922 | return -EBADF; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 923 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 924 |  | 
|  | 925 | /* Bounds check destination. | 
|  | 926 | * | 
|  | 927 | * XXX: This could use review for overflow issues... | 
|  | 928 | */ | 
|  | 929 | if (args->offset > obj->size || args->size > obj->size || | 
|  | 930 | args->offset + args->size > obj->size) { | 
| Luca Barbieri | bc9025b | 2010-02-09 05:49:12 +0000 | [diff] [blame] | 931 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 932 | return -EINVAL; | 
|  | 933 | } | 
|  | 934 |  | 
|  | 935 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 
|  | 936 | * it would end up going through the fenced access, and we'll get | 
|  | 937 | * different detiling behavior between reading and writing. | 
|  | 938 | * pread/pwrite currently are reading and writing from the CPU | 
|  | 939 | * perspective, requiring manual detiling by the client. | 
|  | 940 | */ | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 941 | if (obj_priv->phys_obj) | 
|  | 942 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 
|  | 943 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 
| Chris Wilson | 9b8c4a0 | 2010-05-27 14:21:01 +0100 | [diff] [blame] | 944 | dev->gtt_total != 0 && | 
|  | 945 | obj->write_domain != I915_GEM_DOMAIN_CPU) { | 
| Eric Anholt | 3de09aa | 2009-03-09 09:42:23 -0700 | [diff] [blame] | 946 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); | 
|  | 947 | if (ret == -EFAULT) { | 
|  | 948 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, | 
|  | 949 | file_priv); | 
|  | 950 | } | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 951 | } else if (i915_gem_object_needs_bit17_swizzle(obj)) { | 
|  | 952 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); | 
| Eric Anholt | 40123c1 | 2009-03-09 13:42:30 -0700 | [diff] [blame] | 953 | } else { | 
|  | 954 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); | 
|  | 955 | if (ret == -EFAULT) { | 
|  | 956 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, | 
|  | 957 | file_priv); | 
|  | 958 | } | 
|  | 959 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 960 |  | 
|  | 961 | #if WATCH_PWRITE | 
|  | 962 | if (ret) | 
|  | 963 | DRM_INFO("pwrite failed %d\n", ret); | 
|  | 964 | #endif | 
|  | 965 |  | 
| Luca Barbieri | bc9025b | 2010-02-09 05:49:12 +0000 | [diff] [blame] | 966 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 967 |  | 
|  | 968 | return ret; | 
|  | 969 | } | 
|  | 970 |  | 
|  | 971 | /** | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 972 | * Called when user space prepares to use an object with the CPU, either | 
|  | 973 | * through the mmap ioctl's mapping or a GTT mapping. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 974 | */ | 
|  | 975 | int | 
|  | 976 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 
|  | 977 | struct drm_file *file_priv) | 
|  | 978 | { | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 979 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 980 | struct drm_i915_gem_set_domain *args = data; | 
|  | 981 | struct drm_gem_object *obj; | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 982 | struct drm_i915_gem_object *obj_priv; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 983 | uint32_t read_domains = args->read_domains; | 
|  | 984 | uint32_t write_domain = args->write_domain; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 985 | int ret; | 
|  | 986 |  | 
|  | 987 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 988 | return -ENODEV; | 
|  | 989 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 990 | /* Only handle setting domains to types used by the CPU. */ | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 991 | if (write_domain & I915_GEM_GPU_DOMAINS) | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 992 | return -EINVAL; | 
|  | 993 |  | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 994 | if (read_domains & I915_GEM_GPU_DOMAINS) | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 995 | return -EINVAL; | 
|  | 996 |  | 
|  | 997 | /* Having something in the write domain implies it's in the read | 
|  | 998 | * domain, and only that read domain.  Enforce that in the request. | 
|  | 999 | */ | 
|  | 1000 | if (write_domain != 0 && read_domains != write_domain) | 
|  | 1001 | return -EINVAL; | 
|  | 1002 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1003 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1004 | if (obj == NULL) | 
|  | 1005 | return -EBADF; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1006 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1007 |  | 
|  | 1008 | mutex_lock(&dev->struct_mutex); | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 1009 |  | 
|  | 1010 | intel_mark_busy(dev, obj); | 
|  | 1011 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1012 | #if WATCH_BUF | 
| Krzysztof Halasa | cfd43c0 | 2009-06-20 00:31:28 +0200 | [diff] [blame] | 1013 | DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1014 | obj, obj->size, read_domains, write_domain); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1015 | #endif | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1016 | if (read_domains & I915_GEM_DOMAIN_GTT) { | 
|  | 1017 | ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); | 
| Eric Anholt | 0235439 | 2008-11-26 13:58:13 -0800 | [diff] [blame] | 1018 |  | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1019 | /* Update the LRU on the fence for the CPU access that's | 
|  | 1020 | * about to occur. | 
|  | 1021 | */ | 
|  | 1022 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 1023 | struct drm_i915_fence_reg *reg = | 
|  | 1024 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 1025 | list_move_tail(®->lru_list, | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1026 | &dev_priv->mm.fence_list); | 
|  | 1027 | } | 
|  | 1028 |  | 
| Eric Anholt | 0235439 | 2008-11-26 13:58:13 -0800 | [diff] [blame] | 1029 | /* Silently promote "you're not bound, there was nothing to do" | 
|  | 1030 | * to success, since the client was just asking us to | 
|  | 1031 | * make sure everything was done. | 
|  | 1032 | */ | 
|  | 1033 | if (ret == -EINVAL) | 
|  | 1034 | ret = 0; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1035 | } else { | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1036 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 1037 | } | 
|  | 1038 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1039 | drm_gem_object_unreference(obj); | 
|  | 1040 | mutex_unlock(&dev->struct_mutex); | 
|  | 1041 | return ret; | 
|  | 1042 | } | 
|  | 1043 |  | 
|  | 1044 | /** | 
|  | 1045 | * Called when user space has done writes to this buffer | 
|  | 1046 | */ | 
|  | 1047 | int | 
|  | 1048 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | 
|  | 1049 | struct drm_file *file_priv) | 
|  | 1050 | { | 
|  | 1051 | struct drm_i915_gem_sw_finish *args = data; | 
|  | 1052 | struct drm_gem_object *obj; | 
|  | 1053 | struct drm_i915_gem_object *obj_priv; | 
|  | 1054 | int ret = 0; | 
|  | 1055 |  | 
|  | 1056 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1057 | return -ENODEV; | 
|  | 1058 |  | 
|  | 1059 | mutex_lock(&dev->struct_mutex); | 
|  | 1060 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1061 | if (obj == NULL) { | 
|  | 1062 | mutex_unlock(&dev->struct_mutex); | 
|  | 1063 | return -EBADF; | 
|  | 1064 | } | 
|  | 1065 |  | 
|  | 1066 | #if WATCH_BUF | 
| Krzysztof Halasa | cfd43c0 | 2009-06-20 00:31:28 +0200 | [diff] [blame] | 1067 | DRM_INFO("%s: sw_finish %d (%p %zd)\n", | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1068 | __func__, args->handle, obj, obj->size); | 
|  | 1069 | #endif | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1070 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1071 |  | 
|  | 1072 | /* Pinned buffers may be scanout, so flush the cache */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1073 | if (obj_priv->pin_count) | 
|  | 1074 | i915_gem_object_flush_cpu_write_domain(obj); | 
|  | 1075 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1076 | drm_gem_object_unreference(obj); | 
|  | 1077 | mutex_unlock(&dev->struct_mutex); | 
|  | 1078 | return ret; | 
|  | 1079 | } | 
|  | 1080 |  | 
|  | 1081 | /** | 
|  | 1082 | * Maps the contents of an object, returning the address it is mapped | 
|  | 1083 | * into. | 
|  | 1084 | * | 
|  | 1085 | * While the mapping holds a reference on the contents of the object, it doesn't | 
|  | 1086 | * imply a ref on the object itself. | 
|  | 1087 | */ | 
|  | 1088 | int | 
|  | 1089 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | 
|  | 1090 | struct drm_file *file_priv) | 
|  | 1091 | { | 
|  | 1092 | struct drm_i915_gem_mmap *args = data; | 
|  | 1093 | struct drm_gem_object *obj; | 
|  | 1094 | loff_t offset; | 
|  | 1095 | unsigned long addr; | 
|  | 1096 |  | 
|  | 1097 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1098 | return -ENODEV; | 
|  | 1099 |  | 
|  | 1100 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1101 | if (obj == NULL) | 
|  | 1102 | return -EBADF; | 
|  | 1103 |  | 
|  | 1104 | offset = args->offset; | 
|  | 1105 |  | 
|  | 1106 | down_write(¤t->mm->mmap_sem); | 
|  | 1107 | addr = do_mmap(obj->filp, 0, args->size, | 
|  | 1108 | PROT_READ | PROT_WRITE, MAP_SHARED, | 
|  | 1109 | args->offset); | 
|  | 1110 | up_write(¤t->mm->mmap_sem); | 
| Luca Barbieri | bc9025b | 2010-02-09 05:49:12 +0000 | [diff] [blame] | 1111 | drm_gem_object_unreference_unlocked(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1112 | if (IS_ERR((void *)addr)) | 
|  | 1113 | return addr; | 
|  | 1114 |  | 
|  | 1115 | args->addr_ptr = (uint64_t) addr; | 
|  | 1116 |  | 
|  | 1117 | return 0; | 
|  | 1118 | } | 
|  | 1119 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1120 | /** | 
|  | 1121 | * i915_gem_fault - fault a page into the GTT | 
|  | 1122 | * vma: VMA in question | 
|  | 1123 | * vmf: fault info | 
|  | 1124 | * | 
|  | 1125 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | 
|  | 1126 | * from userspace.  The fault handler takes care of binding the object to | 
|  | 1127 | * the GTT (if needed), allocating and programming a fence register (again, | 
|  | 1128 | * only if needed based on whether the old reg is still valid or the object | 
|  | 1129 | * is tiled) and inserting a new PTE into the faulting process. | 
|  | 1130 | * | 
|  | 1131 | * Note that the faulting process may involve evicting existing objects | 
|  | 1132 | * from the GTT and/or fence registers to make room.  So performance may | 
|  | 1133 | * suffer if the GTT working set is large or there are few fence registers | 
|  | 1134 | * left. | 
|  | 1135 | */ | 
|  | 1136 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
|  | 1137 | { | 
|  | 1138 | struct drm_gem_object *obj = vma->vm_private_data; | 
|  | 1139 | struct drm_device *dev = obj->dev; | 
|  | 1140 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1141 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1142 | pgoff_t page_offset; | 
|  | 1143 | unsigned long pfn; | 
|  | 1144 | int ret = 0; | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 1145 | bool write = !!(vmf->flags & FAULT_FLAG_WRITE); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1146 |  | 
|  | 1147 | /* We don't use vmf->pgoff since that has the fake offset */ | 
|  | 1148 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> | 
|  | 1149 | PAGE_SHIFT; | 
|  | 1150 |  | 
|  | 1151 | /* Now bind it into the GTT if needed */ | 
|  | 1152 | mutex_lock(&dev->struct_mutex); | 
|  | 1153 | if (!obj_priv->gtt_space) { | 
| Chris Wilson | e67b8ce | 2009-09-14 16:50:26 +0100 | [diff] [blame] | 1154 | ret = i915_gem_object_bind_to_gtt(obj, 0); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1155 | if (ret) | 
|  | 1156 | goto unlock; | 
| Kristian Høgsberg | 07f4f3e | 2009-05-27 14:37:28 -0400 | [diff] [blame] | 1157 |  | 
| Jesse Barnes | 14b6039 | 2009-05-20 16:47:08 -0400 | [diff] [blame] | 1158 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1159 |  | 
|  | 1160 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1161 | if (ret) | 
|  | 1162 | goto unlock; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1163 | } | 
|  | 1164 |  | 
|  | 1165 | /* Need a new fence register? */ | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 1166 | if (obj_priv->tiling_mode != I915_TILING_NONE) { | 
| Chris Wilson | 8c4b8c3 | 2009-06-17 22:08:52 +0100 | [diff] [blame] | 1167 | ret = i915_gem_object_get_fence_reg(obj); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1168 | if (ret) | 
|  | 1169 | goto unlock; | 
| Eric Anholt | d9ddcb9 | 2009-01-27 10:33:49 -0800 | [diff] [blame] | 1170 | } | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1171 |  | 
|  | 1172 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 
|  | 1173 | page_offset; | 
|  | 1174 |  | 
|  | 1175 | /* Finally, remap it using the new GTT offset */ | 
|  | 1176 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1177 | unlock: | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1178 | mutex_unlock(&dev->struct_mutex); | 
|  | 1179 |  | 
|  | 1180 | switch (ret) { | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1181 | case 0: | 
|  | 1182 | case -ERESTARTSYS: | 
|  | 1183 | return VM_FAULT_NOPAGE; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1184 | case -ENOMEM: | 
|  | 1185 | case -EAGAIN: | 
|  | 1186 | return VM_FAULT_OOM; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1187 | default: | 
| Chris Wilson | c715089 | 2009-09-23 00:43:56 +0100 | [diff] [blame] | 1188 | return VM_FAULT_SIGBUS; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1189 | } | 
|  | 1190 | } | 
|  | 1191 |  | 
|  | 1192 | /** | 
|  | 1193 | * i915_gem_create_mmap_offset - create a fake mmap offset for an object | 
|  | 1194 | * @obj: obj in question | 
|  | 1195 | * | 
|  | 1196 | * GEM memory mapping works by handing back to userspace a fake mmap offset | 
|  | 1197 | * it can use in a subsequent mmap(2) call.  The DRM core code then looks | 
|  | 1198 | * up the object based on the offset and sets up the various memory mapping | 
|  | 1199 | * structures. | 
|  | 1200 | * | 
|  | 1201 | * This routine allocates and attaches a fake offset for @obj. | 
|  | 1202 | */ | 
|  | 1203 | static int | 
|  | 1204 | i915_gem_create_mmap_offset(struct drm_gem_object *obj) | 
|  | 1205 | { | 
|  | 1206 | struct drm_device *dev = obj->dev; | 
|  | 1207 | struct drm_gem_mm *mm = dev->mm_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1208 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1209 | struct drm_map_list *list; | 
| Benjamin Herrenschmidt | f77d390 | 2009-02-02 16:55:46 +1100 | [diff] [blame] | 1210 | struct drm_local_map *map; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1211 | int ret = 0; | 
|  | 1212 |  | 
|  | 1213 | /* Set the object up for mmap'ing */ | 
|  | 1214 | list = &obj->map_list; | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1215 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1216 | if (!list->map) | 
|  | 1217 | return -ENOMEM; | 
|  | 1218 |  | 
|  | 1219 | map = list->map; | 
|  | 1220 | map->type = _DRM_GEM; | 
|  | 1221 | map->size = obj->size; | 
|  | 1222 | map->handle = obj; | 
|  | 1223 |  | 
|  | 1224 | /* Get a DRM GEM mmap offset allocated... */ | 
|  | 1225 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 
|  | 1226 | obj->size / PAGE_SIZE, 0, 0); | 
|  | 1227 | if (!list->file_offset_node) { | 
|  | 1228 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 
|  | 1229 | ret = -ENOMEM; | 
|  | 1230 | goto out_free_list; | 
|  | 1231 | } | 
|  | 1232 |  | 
|  | 1233 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 
|  | 1234 | obj->size / PAGE_SIZE, 0); | 
|  | 1235 | if (!list->file_offset_node) { | 
|  | 1236 | ret = -ENOMEM; | 
|  | 1237 | goto out_free_list; | 
|  | 1238 | } | 
|  | 1239 |  | 
|  | 1240 | list->hash.key = list->file_offset_node->start; | 
|  | 1241 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | 
|  | 1242 | DRM_ERROR("failed to add to map hash\n"); | 
| Chris Wilson | 5618ca6 | 2009-12-02 15:15:30 +0000 | [diff] [blame] | 1243 | ret = -ENOMEM; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1244 | goto out_free_mm; | 
|  | 1245 | } | 
|  | 1246 |  | 
|  | 1247 | /* By now we should be all set, any drm_mmap request on the offset | 
|  | 1248 | * below will get to our mmap & fault handler */ | 
|  | 1249 | obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; | 
|  | 1250 |  | 
|  | 1251 | return 0; | 
|  | 1252 |  | 
|  | 1253 | out_free_mm: | 
|  | 1254 | drm_mm_put_block(list->file_offset_node); | 
|  | 1255 | out_free_list: | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1256 | kfree(list->map); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1257 |  | 
|  | 1258 | return ret; | 
|  | 1259 | } | 
|  | 1260 |  | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1261 | /** | 
|  | 1262 | * i915_gem_release_mmap - remove physical page mappings | 
|  | 1263 | * @obj: obj in question | 
|  | 1264 | * | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 1265 | * Preserve the reservation of the mmapping with the DRM core code, but | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1266 | * relinquish ownership of the pages back to the system. | 
|  | 1267 | * | 
|  | 1268 | * It is vital that we remove the page mapping if we have mapped a tiled | 
|  | 1269 | * object through the GTT and then lose the fence register due to | 
|  | 1270 | * resource pressure. Similarly if the object has been moved out of the | 
|  | 1271 | * aperture, than pages mapped into userspace must be revoked. Removing the | 
|  | 1272 | * mapping will then trigger a page fault on the next user access, allowing | 
|  | 1273 | * fixup by i915_gem_fault(). | 
|  | 1274 | */ | 
| Eric Anholt | d05ca30 | 2009-07-10 13:02:26 -0700 | [diff] [blame] | 1275 | void | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1276 | i915_gem_release_mmap(struct drm_gem_object *obj) | 
|  | 1277 | { | 
|  | 1278 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1279 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 901782b | 2009-07-10 08:18:50 +0100 | [diff] [blame] | 1280 |  | 
|  | 1281 | if (dev->dev_mapping) | 
|  | 1282 | unmap_mapping_range(dev->dev_mapping, | 
|  | 1283 | obj_priv->mmap_offset, obj->size, 1); | 
|  | 1284 | } | 
|  | 1285 |  | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1286 | static void | 
|  | 1287 | i915_gem_free_mmap_offset(struct drm_gem_object *obj) | 
|  | 1288 | { | 
|  | 1289 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1290 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1291 | struct drm_gem_mm *mm = dev->mm_private; | 
|  | 1292 | struct drm_map_list *list; | 
|  | 1293 |  | 
|  | 1294 | list = &obj->map_list; | 
|  | 1295 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 
|  | 1296 |  | 
|  | 1297 | if (list->file_offset_node) { | 
|  | 1298 | drm_mm_put_block(list->file_offset_node); | 
|  | 1299 | list->file_offset_node = NULL; | 
|  | 1300 | } | 
|  | 1301 |  | 
|  | 1302 | if (list->map) { | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1303 | kfree(list->map); | 
| Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1304 | list->map = NULL; | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | obj_priv->mmap_offset = 0; | 
|  | 1308 | } | 
|  | 1309 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1310 | /** | 
|  | 1311 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object | 
|  | 1312 | * @obj: object to check | 
|  | 1313 | * | 
|  | 1314 | * Return the required GTT alignment for an object, taking into account | 
|  | 1315 | * potential fence register mapping if needed. | 
|  | 1316 | */ | 
|  | 1317 | static uint32_t | 
|  | 1318 | i915_gem_get_gtt_alignment(struct drm_gem_object *obj) | 
|  | 1319 | { | 
|  | 1320 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1321 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1322 | int start, i; | 
|  | 1323 |  | 
|  | 1324 | /* | 
|  | 1325 | * Minimum alignment is 4k (GTT page size), but might be greater | 
|  | 1326 | * if a fence register is needed for the object. | 
|  | 1327 | */ | 
|  | 1328 | if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) | 
|  | 1329 | return 4096; | 
|  | 1330 |  | 
|  | 1331 | /* | 
|  | 1332 | * Previous chips need to be aligned to the size of the smallest | 
|  | 1333 | * fence register that can contain the object. | 
|  | 1334 | */ | 
|  | 1335 | if (IS_I9XX(dev)) | 
|  | 1336 | start = 1024*1024; | 
|  | 1337 | else | 
|  | 1338 | start = 512*1024; | 
|  | 1339 |  | 
|  | 1340 | for (i = start; i < obj->size; i <<= 1) | 
|  | 1341 | ; | 
|  | 1342 |  | 
|  | 1343 | return i; | 
|  | 1344 | } | 
|  | 1345 |  | 
|  | 1346 | /** | 
|  | 1347 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | 
|  | 1348 | * @dev: DRM device | 
|  | 1349 | * @data: GTT mapping ioctl data | 
|  | 1350 | * @file_priv: GEM object info | 
|  | 1351 | * | 
|  | 1352 | * Simply returns the fake offset to userspace so it can mmap it. | 
|  | 1353 | * The mmap call will end up in drm_gem_mmap(), which will set things | 
|  | 1354 | * up so we can get faults in the handler above. | 
|  | 1355 | * | 
|  | 1356 | * The fault handler will take care of binding the object into the GTT | 
|  | 1357 | * (since it may have been evicted to make room for something), allocating | 
|  | 1358 | * a fence register, and mapping the appropriate aperture address into | 
|  | 1359 | * userspace. | 
|  | 1360 | */ | 
|  | 1361 | int | 
|  | 1362 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | 
|  | 1363 | struct drm_file *file_priv) | 
|  | 1364 | { | 
|  | 1365 | struct drm_i915_gem_mmap_gtt *args = data; | 
|  | 1366 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 1367 | struct drm_gem_object *obj; | 
|  | 1368 | struct drm_i915_gem_object *obj_priv; | 
|  | 1369 | int ret; | 
|  | 1370 |  | 
|  | 1371 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 
|  | 1372 | return -ENODEV; | 
|  | 1373 |  | 
|  | 1374 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 1375 | if (obj == NULL) | 
|  | 1376 | return -EBADF; | 
|  | 1377 |  | 
|  | 1378 | mutex_lock(&dev->struct_mutex); | 
|  | 1379 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1380 | obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1381 |  | 
| Chris Wilson | ab18282 | 2009-09-22 18:46:17 +0100 | [diff] [blame] | 1382 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 
|  | 1383 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); | 
|  | 1384 | drm_gem_object_unreference(obj); | 
|  | 1385 | mutex_unlock(&dev->struct_mutex); | 
|  | 1386 | return -EINVAL; | 
|  | 1387 | } | 
|  | 1388 |  | 
|  | 1389 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1390 | if (!obj_priv->mmap_offset) { | 
|  | 1391 | ret = i915_gem_create_mmap_offset(obj); | 
| Chris Wilson | 13af106 | 2009-02-11 14:26:31 +0000 | [diff] [blame] | 1392 | if (ret) { | 
|  | 1393 | drm_gem_object_unreference(obj); | 
|  | 1394 | mutex_unlock(&dev->struct_mutex); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1395 | return ret; | 
| Chris Wilson | 13af106 | 2009-02-11 14:26:31 +0000 | [diff] [blame] | 1396 | } | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1397 | } | 
|  | 1398 |  | 
|  | 1399 | args->offset = obj_priv->mmap_offset; | 
|  | 1400 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1401 | /* | 
|  | 1402 | * Pull it into the GTT so that we have a page list (makes the | 
|  | 1403 | * initial fault faster and any subsequent flushing possible). | 
|  | 1404 | */ | 
|  | 1405 | if (!obj_priv->agp_mem) { | 
| Chris Wilson | e67b8ce | 2009-09-14 16:50:26 +0100 | [diff] [blame] | 1406 | ret = i915_gem_object_bind_to_gtt(obj, 0); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1407 | if (ret) { | 
|  | 1408 | drm_gem_object_unreference(obj); | 
|  | 1409 | mutex_unlock(&dev->struct_mutex); | 
|  | 1410 | return ret; | 
|  | 1411 | } | 
| Jesse Barnes | 14b6039 | 2009-05-20 16:47:08 -0400 | [diff] [blame] | 1412 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1413 | } | 
|  | 1414 |  | 
|  | 1415 | drm_gem_object_unreference(obj); | 
|  | 1416 | mutex_unlock(&dev->struct_mutex); | 
|  | 1417 |  | 
|  | 1418 | return 0; | 
|  | 1419 | } | 
|  | 1420 |  | 
| Ben Gamari | 6911a9b | 2009-04-02 11:24:54 -0700 | [diff] [blame] | 1421 | void | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1422 | i915_gem_object_put_pages(struct drm_gem_object *obj) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1423 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1424 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1425 | int page_count = obj->size / PAGE_SIZE; | 
|  | 1426 | int i; | 
|  | 1427 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1428 | BUG_ON(obj_priv->pages_refcount == 0); | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1429 | BUG_ON(obj_priv->madv == __I915_MADV_PURGED); | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1430 |  | 
|  | 1431 | if (--obj_priv->pages_refcount != 0) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1432 | return; | 
|  | 1433 |  | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 1434 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 
|  | 1435 | i915_gem_object_save_bit_17_swizzle(obj); | 
|  | 1436 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1437 | if (obj_priv->madv == I915_MADV_DONTNEED) | 
| Chris Wilson | 13a05fd | 2009-09-20 23:03:19 +0100 | [diff] [blame] | 1438 | obj_priv->dirty = 0; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1439 |  | 
|  | 1440 | for (i = 0; i < page_count; i++) { | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1441 | if (obj_priv->dirty) | 
|  | 1442 | set_page_dirty(obj_priv->pages[i]); | 
|  | 1443 |  | 
|  | 1444 | if (obj_priv->madv == I915_MADV_WILLNEED) | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1445 | mark_page_accessed(obj_priv->pages[i]); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 1446 |  | 
|  | 1447 | page_cache_release(obj_priv->pages[i]); | 
|  | 1448 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1449 | obj_priv->dirty = 0; | 
|  | 1450 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 1451 | drm_free_large(obj_priv->pages); | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1452 | obj_priv->pages = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1453 | } | 
|  | 1454 |  | 
|  | 1455 | static void | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1456 | i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, | 
|  | 1457 | struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1458 | { | 
|  | 1459 | struct drm_device *dev = obj->dev; | 
|  | 1460 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1461 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1462 | BUG_ON(ring == NULL); | 
|  | 1463 | obj_priv->ring = ring; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1464 |  | 
|  | 1465 | /* Add a reference if we're newly entering the active list. */ | 
|  | 1466 | if (!obj_priv->active) { | 
|  | 1467 | drm_gem_object_reference(obj); | 
|  | 1468 | obj_priv->active = 1; | 
|  | 1469 | } | 
|  | 1470 | /* Move from whatever list we were on to the tail of execution. */ | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 1471 | spin_lock(&dev_priv->mm.active_list_lock); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1472 | list_move_tail(&obj_priv->list, &ring->active_list); | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 1473 | spin_unlock(&dev_priv->mm.active_list_lock); | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1474 | obj_priv->last_rendering_seqno = seqno; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1475 | } | 
|  | 1476 |  | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1477 | static void | 
|  | 1478 | i915_gem_object_move_to_flushing(struct drm_gem_object *obj) | 
|  | 1479 | { | 
|  | 1480 | struct drm_device *dev = obj->dev; | 
|  | 1481 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1482 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1483 |  | 
|  | 1484 | BUG_ON(!obj_priv->active); | 
|  | 1485 | list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); | 
|  | 1486 | obj_priv->last_rendering_seqno = 0; | 
|  | 1487 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1488 |  | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 1489 | /* Immediately discard the backing storage */ | 
|  | 1490 | static void | 
|  | 1491 | i915_gem_object_truncate(struct drm_gem_object *obj) | 
|  | 1492 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1493 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1494 | struct inode *inode; | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 1495 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 1496 | inode = obj->filp->f_path.dentry->d_inode; | 
|  | 1497 | if (inode->i_op->truncate) | 
|  | 1498 | inode->i_op->truncate (inode); | 
|  | 1499 |  | 
|  | 1500 | obj_priv->madv = __I915_MADV_PURGED; | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 1501 | } | 
|  | 1502 |  | 
|  | 1503 | static inline int | 
|  | 1504 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | 
|  | 1505 | { | 
|  | 1506 | return obj_priv->madv == I915_MADV_DONTNEED; | 
|  | 1507 | } | 
|  | 1508 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1509 | static void | 
|  | 1510 | i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | 
|  | 1511 | { | 
|  | 1512 | struct drm_device *dev = obj->dev; | 
|  | 1513 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1514 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1515 |  | 
|  | 1516 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 1517 | if (obj_priv->pin_count != 0) | 
|  | 1518 | list_del_init(&obj_priv->list); | 
|  | 1519 | else | 
|  | 1520 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 
|  | 1521 |  | 
| Daniel Vetter | 99fcb76 | 2010-02-07 16:20:18 +0100 | [diff] [blame] | 1522 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | 
|  | 1523 |  | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1524 | obj_priv->last_rendering_seqno = 0; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1525 | obj_priv->ring = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1526 | if (obj_priv->active) { | 
|  | 1527 | obj_priv->active = 0; | 
|  | 1528 | drm_gem_object_unreference(obj); | 
|  | 1529 | } | 
|  | 1530 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 1531 | } | 
|  | 1532 |  | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1533 | static void | 
|  | 1534 | i915_gem_process_flushing_list(struct drm_device *dev, | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1535 | uint32_t flush_domains, uint32_t seqno, | 
|  | 1536 | struct intel_ring_buffer *ring) | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1537 | { | 
|  | 1538 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1539 | struct drm_i915_gem_object *obj_priv, *next; | 
|  | 1540 |  | 
|  | 1541 | list_for_each_entry_safe(obj_priv, next, | 
|  | 1542 | &dev_priv->mm.gpu_write_list, | 
|  | 1543 | gpu_write_list) { | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 1544 | struct drm_gem_object *obj = &obj_priv->base; | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1545 |  | 
|  | 1546 | if ((obj->write_domain & flush_domains) == | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1547 | obj->write_domain && | 
|  | 1548 | obj_priv->ring->ring_flag == ring->ring_flag) { | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1549 | uint32_t old_write_domain = obj->write_domain; | 
|  | 1550 |  | 
|  | 1551 | obj->write_domain = 0; | 
|  | 1552 | list_del_init(&obj_priv->gpu_write_list); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1553 | i915_gem_object_move_to_active(obj, seqno, ring); | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1554 |  | 
|  | 1555 | /* update the fence lru list */ | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 1556 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 
|  | 1557 | struct drm_i915_fence_reg *reg = | 
|  | 1558 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 1559 | list_move_tail(®->lru_list, | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1560 | &dev_priv->mm.fence_list); | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 1561 | } | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1562 |  | 
|  | 1563 | trace_i915_gem_object_change_domain(obj, | 
|  | 1564 | obj->read_domains, | 
|  | 1565 | old_write_domain); | 
|  | 1566 | } | 
|  | 1567 | } | 
|  | 1568 | } | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1569 |  | 
| Daniel Vetter | 5a5a0c6 | 2009-09-15 22:57:36 +0200 | [diff] [blame] | 1570 | uint32_t | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1571 | i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1572 | uint32_t flush_domains, struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1573 | { | 
|  | 1574 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1575 | struct drm_i915_file_private *i915_file_priv = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1576 | struct drm_i915_gem_request *request; | 
|  | 1577 | uint32_t seqno; | 
|  | 1578 | int was_empty; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1579 |  | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1580 | if (file_priv != NULL) | 
|  | 1581 | i915_file_priv = file_priv->driver_priv; | 
|  | 1582 |  | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1583 | request = kzalloc(sizeof(*request), GFP_KERNEL); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1584 | if (request == NULL) | 
|  | 1585 | return 0; | 
|  | 1586 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1587 | seqno = ring->add_request(dev, ring, file_priv, flush_domains); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1588 |  | 
|  | 1589 | request->seqno = seqno; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1590 | request->ring = ring; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1591 | request->emitted_jiffies = jiffies; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1592 | was_empty = list_empty(&ring->request_list); | 
|  | 1593 | list_add_tail(&request->list, &ring->request_list); | 
|  | 1594 |  | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1595 | if (i915_file_priv) { | 
|  | 1596 | list_add_tail(&request->client_list, | 
|  | 1597 | &i915_file_priv->mm.request_list); | 
|  | 1598 | } else { | 
|  | 1599 | INIT_LIST_HEAD(&request->client_list); | 
|  | 1600 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1601 |  | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1602 | /* Associate any objects on the flushing list matching the write | 
|  | 1603 | * domain we're flushing with our flush. | 
|  | 1604 | */ | 
| Daniel Vetter | 6356039 | 2010-02-19 11:51:59 +0100 | [diff] [blame] | 1605 | if (flush_domains != 0) | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1606 | i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1607 |  | 
| Ben Gamari | f65d942 | 2009-09-14 17:48:44 -0400 | [diff] [blame] | 1608 | if (!dev_priv->mm.suspended) { | 
|  | 1609 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | 
|  | 1610 | if (was_empty) | 
|  | 1611 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 
|  | 1612 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1613 | return seqno; | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | /** | 
|  | 1617 | * Command execution barrier | 
|  | 1618 | * | 
|  | 1619 | * Ensures that all commands in the ring are finished | 
|  | 1620 | * before signalling the CPU | 
|  | 1621 | */ | 
| Eric Anholt | 3043c60 | 2008-10-02 12:24:47 -0700 | [diff] [blame] | 1622 | static uint32_t | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1623 | i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1624 | { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1625 | uint32_t flush_domains = 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1626 |  | 
|  | 1627 | /* The sampler always gets flushed on i965 (sigh) */ | 
|  | 1628 | if (IS_I965G(dev)) | 
|  | 1629 | flush_domains |= I915_GEM_DOMAIN_SAMPLER; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1630 |  | 
|  | 1631 | ring->flush(dev, ring, | 
|  | 1632 | I915_GEM_DOMAIN_COMMAND, flush_domains); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1633 | return flush_domains; | 
|  | 1634 | } | 
|  | 1635 |  | 
|  | 1636 | /** | 
|  | 1637 | * Moves buffers associated only with the given active seqno from the active | 
|  | 1638 | * to inactive list, potentially freeing them. | 
|  | 1639 | */ | 
|  | 1640 | static void | 
|  | 1641 | i915_gem_retire_request(struct drm_device *dev, | 
|  | 1642 | struct drm_i915_gem_request *request) | 
|  | 1643 | { | 
|  | 1644 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1645 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 1646 | trace_i915_gem_request_retire(dev, request->seqno); | 
|  | 1647 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1648 | /* Move any buffers on the active list that are no longer referenced | 
|  | 1649 | * by the ringbuffer to the flushing/inactive lists as appropriate. | 
|  | 1650 | */ | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 1651 | spin_lock(&dev_priv->mm.active_list_lock); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1652 | while (!list_empty(&request->ring->active_list)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1653 | struct drm_gem_object *obj; | 
|  | 1654 | struct drm_i915_gem_object *obj_priv; | 
|  | 1655 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1656 | obj_priv = list_first_entry(&request->ring->active_list, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1657 | struct drm_i915_gem_object, | 
|  | 1658 | list); | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 1659 | obj = &obj_priv->base; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1660 |  | 
|  | 1661 | /* If the seqno being retired doesn't match the oldest in the | 
|  | 1662 | * list, then the oldest in the list must still be newer than | 
|  | 1663 | * this seqno. | 
|  | 1664 | */ | 
|  | 1665 | if (obj_priv->last_rendering_seqno != request->seqno) | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 1666 | goto out; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 1667 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1668 | #if WATCH_LRU | 
|  | 1669 | DRM_INFO("%s: retire %d moves to inactive list %p\n", | 
|  | 1670 | __func__, request->seqno, obj); | 
|  | 1671 | #endif | 
|  | 1672 |  | 
| Eric Anholt | ce44b0e | 2008-11-06 16:00:31 -0800 | [diff] [blame] | 1673 | if (obj->write_domain != 0) | 
|  | 1674 | i915_gem_object_move_to_flushing(obj); | 
| Shaohua Li | 68c8434 | 2009-04-08 10:58:23 +0800 | [diff] [blame] | 1675 | else { | 
|  | 1676 | /* Take a reference on the object so it won't be | 
|  | 1677 | * freed while the spinlock is held.  The list | 
|  | 1678 | * protection for this spinlock is safe when breaking | 
|  | 1679 | * the lock like this since the next thing we do | 
|  | 1680 | * is just get the head of the list again. | 
|  | 1681 | */ | 
|  | 1682 | drm_gem_object_reference(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1683 | i915_gem_object_move_to_inactive(obj); | 
| Shaohua Li | 68c8434 | 2009-04-08 10:58:23 +0800 | [diff] [blame] | 1684 | spin_unlock(&dev_priv->mm.active_list_lock); | 
|  | 1685 | drm_gem_object_unreference(obj); | 
|  | 1686 | spin_lock(&dev_priv->mm.active_list_lock); | 
|  | 1687 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1688 | } | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 1689 | out: | 
|  | 1690 | spin_unlock(&dev_priv->mm.active_list_lock); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1691 | } | 
|  | 1692 |  | 
|  | 1693 | /** | 
|  | 1694 | * Returns true if seq1 is later than seq2. | 
|  | 1695 | */ | 
| Ben Gamari | 22be172 | 2009-09-14 17:48:43 -0400 | [diff] [blame] | 1696 | bool | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1697 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) | 
|  | 1698 | { | 
|  | 1699 | return (int32_t)(seq1 - seq2) >= 0; | 
|  | 1700 | } | 
|  | 1701 |  | 
|  | 1702 | uint32_t | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1703 | i915_get_gem_seqno(struct drm_device *dev, | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 1704 | struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1705 | { | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1706 | return ring->get_gem_seqno(dev, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1707 | } | 
|  | 1708 |  | 
|  | 1709 | /** | 
|  | 1710 | * This function clears the request list as sequence numbers are passed. | 
|  | 1711 | */ | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1712 | static void | 
|  | 1713 | i915_gem_retire_requests_ring(struct drm_device *dev, | 
|  | 1714 | struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1715 | { | 
|  | 1716 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1717 | uint32_t seqno; | 
|  | 1718 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1719 | if (!ring->status_page.page_addr | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1720 | || list_empty(&ring->request_list)) | 
| Karsten Wiese | 6c0594a | 2009-02-23 15:07:57 +0100 | [diff] [blame] | 1721 | return; | 
|  | 1722 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1723 | seqno = i915_get_gem_seqno(dev, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1724 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1725 | while (!list_empty(&ring->request_list)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1726 | struct drm_i915_gem_request *request; | 
|  | 1727 | uint32_t retiring_seqno; | 
|  | 1728 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1729 | request = list_first_entry(&ring->request_list, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1730 | struct drm_i915_gem_request, | 
|  | 1731 | list); | 
|  | 1732 | retiring_seqno = request->seqno; | 
|  | 1733 |  | 
|  | 1734 | if (i915_seqno_passed(seqno, retiring_seqno) || | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 1735 | atomic_read(&dev_priv->mm.wedged)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1736 | i915_gem_retire_request(dev, request); | 
|  | 1737 |  | 
|  | 1738 | list_del(&request->list); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 1739 | list_del(&request->client_list); | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 1740 | kfree(request); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1741 | } else | 
|  | 1742 | break; | 
|  | 1743 | } | 
| Chris Wilson | 9d34e5d | 2009-09-24 05:26:06 +0100 | [diff] [blame] | 1744 |  | 
|  | 1745 | if (unlikely (dev_priv->trace_irq_seqno && | 
|  | 1746 | i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1747 |  | 
|  | 1748 | ring->user_irq_put(dev, ring); | 
| Chris Wilson | 9d34e5d | 2009-09-24 05:26:06 +0100 | [diff] [blame] | 1749 | dev_priv->trace_irq_seqno = 0; | 
|  | 1750 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1751 | } | 
|  | 1752 |  | 
|  | 1753 | void | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1754 | i915_gem_retire_requests(struct drm_device *dev) | 
|  | 1755 | { | 
|  | 1756 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1757 |  | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 1758 | if (!list_empty(&dev_priv->mm.deferred_free_list)) { | 
|  | 1759 | struct drm_i915_gem_object *obj_priv, *tmp; | 
|  | 1760 |  | 
|  | 1761 | /* We must be careful that during unbind() we do not | 
|  | 1762 | * accidentally infinitely recurse into retire requests. | 
|  | 1763 | * Currently: | 
|  | 1764 | *   retire -> free -> unbind -> wait -> retire_ring | 
|  | 1765 | */ | 
|  | 1766 | list_for_each_entry_safe(obj_priv, tmp, | 
|  | 1767 | &dev_priv->mm.deferred_free_list, | 
|  | 1768 | list) | 
|  | 1769 | i915_gem_free_object_tail(&obj_priv->base); | 
|  | 1770 | } | 
|  | 1771 |  | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1772 | i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); | 
|  | 1773 | if (HAS_BSD(dev)) | 
|  | 1774 | i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); | 
|  | 1775 | } | 
|  | 1776 |  | 
|  | 1777 | void | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1778 | i915_gem_retire_work_handler(struct work_struct *work) | 
|  | 1779 | { | 
|  | 1780 | drm_i915_private_t *dev_priv; | 
|  | 1781 | struct drm_device *dev; | 
|  | 1782 |  | 
|  | 1783 | dev_priv = container_of(work, drm_i915_private_t, | 
|  | 1784 | mm.retire_work.work); | 
|  | 1785 | dev = dev_priv->dev; | 
|  | 1786 |  | 
|  | 1787 | mutex_lock(&dev->struct_mutex); | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1788 | i915_gem_retire_requests(dev); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 1789 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 1790 | if (!dev_priv->mm.suspended && | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 1791 | (!list_empty(&dev_priv->render_ring.request_list) || | 
|  | 1792 | (HAS_BSD(dev) && | 
|  | 1793 | !list_empty(&dev_priv->bsd_ring.request_list)))) | 
| Eric Anholt | 9c9fe1f | 2009-08-03 16:09:16 -0700 | [diff] [blame] | 1794 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1795 | mutex_unlock(&dev->struct_mutex); | 
|  | 1796 | } | 
|  | 1797 |  | 
| Daniel Vetter | 5a5a0c6 | 2009-09-15 22:57:36 +0200 | [diff] [blame] | 1798 | int | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1799 | i915_do_wait_request(struct drm_device *dev, uint32_t seqno, | 
|  | 1800 | int interruptible, struct intel_ring_buffer *ring) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1801 | { | 
|  | 1802 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Jesse Barnes | 802c7eb | 2009-05-05 16:03:48 -0700 | [diff] [blame] | 1803 | u32 ier; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1804 | int ret = 0; | 
|  | 1805 |  | 
|  | 1806 | BUG_ON(seqno == 0); | 
|  | 1807 |  | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 1808 | if (atomic_read(&dev_priv->mm.wedged)) | 
| Ben Gamari | ffed1d0 | 2009-09-14 17:48:41 -0400 | [diff] [blame] | 1809 | return -EIO; | 
|  | 1810 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1811 | if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { | 
| Eric Anholt | bad720f | 2009-10-22 16:11:14 -0700 | [diff] [blame] | 1812 | if (HAS_PCH_SPLIT(dev)) | 
| Zhenyu Wang | 036a4a7 | 2009-06-08 14:40:19 +0800 | [diff] [blame] | 1813 | ier = I915_READ(DEIER) | I915_READ(GTIER); | 
|  | 1814 | else | 
|  | 1815 | ier = I915_READ(IER); | 
| Jesse Barnes | 802c7eb | 2009-05-05 16:03:48 -0700 | [diff] [blame] | 1816 | if (!ier) { | 
|  | 1817 | DRM_ERROR("something (likely vbetool) disabled " | 
|  | 1818 | "interrupts, re-enabling\n"); | 
|  | 1819 | i915_driver_irq_preinstall(dev); | 
|  | 1820 | i915_driver_irq_postinstall(dev); | 
|  | 1821 | } | 
|  | 1822 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 1823 | trace_i915_gem_request_wait_begin(dev, seqno); | 
|  | 1824 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1825 | ring->waiting_gem_seqno = seqno; | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1826 | ring->user_irq_get(dev, ring); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 1827 | if (interruptible) | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1828 | ret = wait_event_interruptible(ring->irq_queue, | 
|  | 1829 | i915_seqno_passed( | 
|  | 1830 | ring->get_gem_seqno(dev, ring), seqno) | 
|  | 1831 | || atomic_read(&dev_priv->mm.wedged)); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 1832 | else | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1833 | wait_event(ring->irq_queue, | 
|  | 1834 | i915_seqno_passed( | 
|  | 1835 | ring->get_gem_seqno(dev, ring), seqno) | 
|  | 1836 | || atomic_read(&dev_priv->mm.wedged)); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 1837 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1838 | ring->user_irq_put(dev, ring); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1839 | ring->waiting_gem_seqno = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 1840 |  | 
|  | 1841 | trace_i915_gem_request_wait_end(dev, seqno); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1842 | } | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 1843 | if (atomic_read(&dev_priv->mm.wedged)) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1844 | ret = -EIO; | 
|  | 1845 |  | 
|  | 1846 | if (ret && ret != -ERESTARTSYS) | 
|  | 1847 | DRM_ERROR("%s returns %d (awaiting %d at %d)\n", | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1848 | __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1849 |  | 
|  | 1850 | /* Directly dispatch request retiring.  While we have the work queue | 
|  | 1851 | * to handle this, the waiter on a request often wants an associated | 
|  | 1852 | * buffer to have made it to the inactive list, and we would need | 
|  | 1853 | * a separate wait queue to handle that. | 
|  | 1854 | */ | 
|  | 1855 | if (ret == 0) | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 1856 | i915_gem_retire_requests_ring(dev, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1857 |  | 
|  | 1858 | return ret; | 
|  | 1859 | } | 
|  | 1860 |  | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 1861 | /** | 
|  | 1862 | * Waits for a sequence number to be signaled, and cleans up the | 
|  | 1863 | * request and object lists appropriately for that event. | 
|  | 1864 | */ | 
|  | 1865 | static int | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1866 | i915_wait_request(struct drm_device *dev, uint32_t seqno, | 
|  | 1867 | struct intel_ring_buffer *ring) | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 1868 | { | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1869 | return i915_do_wait_request(dev, seqno, 1, ring); | 
| Daniel Vetter | 48764bf | 2009-09-15 22:57:32 +0200 | [diff] [blame] | 1870 | } | 
|  | 1871 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1872 | static void | 
|  | 1873 | i915_gem_flush(struct drm_device *dev, | 
|  | 1874 | uint32_t invalidate_domains, | 
|  | 1875 | uint32_t flush_domains) | 
|  | 1876 | { | 
|  | 1877 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 1878 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 
|  | 1879 | drm_agp_chipset_flush(dev); | 
|  | 1880 | dev_priv->render_ring.flush(dev, &dev_priv->render_ring, | 
|  | 1881 | invalidate_domains, | 
|  | 1882 | flush_domains); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 1883 |  | 
|  | 1884 | if (HAS_BSD(dev)) | 
|  | 1885 | dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, | 
|  | 1886 | invalidate_domains, | 
|  | 1887 | flush_domains); | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 1888 | } | 
|  | 1889 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1890 | static void | 
|  | 1891 | i915_gem_flush_ring(struct drm_device *dev, | 
|  | 1892 | uint32_t invalidate_domains, | 
|  | 1893 | uint32_t flush_domains, | 
|  | 1894 | struct intel_ring_buffer *ring) | 
|  | 1895 | { | 
|  | 1896 | if (flush_domains & I915_GEM_DOMAIN_CPU) | 
|  | 1897 | drm_agp_chipset_flush(dev); | 
|  | 1898 | ring->flush(dev, ring, | 
|  | 1899 | invalidate_domains, | 
|  | 1900 | flush_domains); | 
|  | 1901 | } | 
|  | 1902 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1903 | /** | 
|  | 1904 | * Ensures that all rendering to the object has completed and the object is | 
|  | 1905 | * safe to unbind from the GTT or access from the CPU. | 
|  | 1906 | */ | 
|  | 1907 | static int | 
|  | 1908 | i915_gem_object_wait_rendering(struct drm_gem_object *obj) | 
|  | 1909 | { | 
|  | 1910 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1911 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1912 | int ret; | 
|  | 1913 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1914 | /* This function only exists to support waiting for existing rendering, | 
|  | 1915 | * not for emitting required flushes. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1916 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1917 | BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1918 |  | 
|  | 1919 | /* If there is rendering queued on the buffer being evicted, wait for | 
|  | 1920 | * it. | 
|  | 1921 | */ | 
|  | 1922 | if (obj_priv->active) { | 
|  | 1923 | #if WATCH_BUF | 
|  | 1924 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 
|  | 1925 | __func__, obj, obj_priv->last_rendering_seqno); | 
|  | 1926 | #endif | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 1927 | ret = i915_wait_request(dev, | 
|  | 1928 | obj_priv->last_rendering_seqno, obj_priv->ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1929 | if (ret != 0) | 
|  | 1930 | return ret; | 
|  | 1931 | } | 
|  | 1932 |  | 
|  | 1933 | return 0; | 
|  | 1934 | } | 
|  | 1935 |  | 
|  | 1936 | /** | 
|  | 1937 | * Unbinds an object from the GTT aperture. | 
|  | 1938 | */ | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 1939 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1940 | i915_gem_object_unbind(struct drm_gem_object *obj) | 
|  | 1941 | { | 
|  | 1942 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 4a87b8c | 2010-02-19 11:51:57 +0100 | [diff] [blame] | 1943 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 1944 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1945 | int ret = 0; | 
|  | 1946 |  | 
|  | 1947 | #if WATCH_BUF | 
|  | 1948 | DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); | 
|  | 1949 | DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); | 
|  | 1950 | #endif | 
|  | 1951 | if (obj_priv->gtt_space == NULL) | 
|  | 1952 | return 0; | 
|  | 1953 |  | 
|  | 1954 | if (obj_priv->pin_count != 0) { | 
|  | 1955 | DRM_ERROR("Attempting to unbind pinned buffer\n"); | 
|  | 1956 | return -EINVAL; | 
|  | 1957 | } | 
|  | 1958 |  | 
| Eric Anholt | 5323fd0 | 2009-09-09 11:50:45 -0700 | [diff] [blame] | 1959 | /* blow away mappings if mapped through GTT */ | 
|  | 1960 | i915_gem_release_mmap(obj); | 
|  | 1961 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1962 | /* Move the object to the CPU domain to ensure that | 
|  | 1963 | * any possible CPU writes while it's not in the GTT | 
|  | 1964 | * are flushed when we go to remap it. This will | 
|  | 1965 | * also ensure that all pending GPU writes are finished | 
|  | 1966 | * before we unbind. | 
|  | 1967 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 1968 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 
| Chris Wilson | 8dc1775 | 2010-07-23 23:18:51 +0100 | [diff] [blame] | 1969 | if (ret == -ERESTARTSYS) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1970 | return ret; | 
| Chris Wilson | 8dc1775 | 2010-07-23 23:18:51 +0100 | [diff] [blame] | 1971 | /* Continue on if we fail due to EIO, the GPU is hung so we | 
|  | 1972 | * should be safe and we need to cleanup or else we might | 
|  | 1973 | * cause memory corruption through use-after-free. | 
|  | 1974 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1975 |  | 
| Eric Anholt | 5323fd0 | 2009-09-09 11:50:45 -0700 | [diff] [blame] | 1976 | BUG_ON(obj_priv->active); | 
|  | 1977 |  | 
| Daniel Vetter | 96b47b6 | 2009-12-15 17:50:00 +0100 | [diff] [blame] | 1978 | /* release the fence reg _after_ flushing */ | 
|  | 1979 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 
|  | 1980 | i915_gem_clear_fence_reg(obj); | 
|  | 1981 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1982 | if (obj_priv->agp_mem != NULL) { | 
|  | 1983 | drm_unbind_agp(obj_priv->agp_mem); | 
|  | 1984 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 
|  | 1985 | obj_priv->agp_mem = NULL; | 
|  | 1986 | } | 
|  | 1987 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 1988 | i915_gem_object_put_pages(obj); | 
| Chris Wilson | a32808c | 2009-09-20 21:29:47 +0100 | [diff] [blame] | 1989 | BUG_ON(obj_priv->pages_refcount); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1990 |  | 
|  | 1991 | if (obj_priv->gtt_space) { | 
|  | 1992 | atomic_dec(&dev->gtt_count); | 
|  | 1993 | atomic_sub(obj->size, &dev->gtt_memory); | 
|  | 1994 |  | 
|  | 1995 | drm_mm_put_block(obj_priv->gtt_space); | 
|  | 1996 | obj_priv->gtt_space = NULL; | 
|  | 1997 | } | 
|  | 1998 |  | 
|  | 1999 | /* Remove ourselves from the LRU list if present. */ | 
| Daniel Vetter | 4a87b8c | 2010-02-19 11:51:57 +0100 | [diff] [blame] | 2000 | spin_lock(&dev_priv->mm.active_list_lock); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2001 | if (!list_empty(&obj_priv->list)) | 
|  | 2002 | list_del_init(&obj_priv->list); | 
| Daniel Vetter | 4a87b8c | 2010-02-19 11:51:57 +0100 | [diff] [blame] | 2003 | spin_unlock(&dev_priv->mm.active_list_lock); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2004 |  | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 2005 | if (i915_gem_object_is_purgeable(obj_priv)) | 
|  | 2006 | i915_gem_object_truncate(obj); | 
|  | 2007 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2008 | trace_i915_gem_object_unbind(obj); | 
|  | 2009 |  | 
| Chris Wilson | 8dc1775 | 2010-07-23 23:18:51 +0100 | [diff] [blame] | 2010 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2011 | } | 
|  | 2012 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2013 | static struct drm_gem_object * | 
|  | 2014 | i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | 
|  | 2015 | { | 
|  | 2016 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 2017 | struct drm_i915_gem_object *obj_priv; | 
|  | 2018 | struct drm_gem_object *best = NULL; | 
|  | 2019 | struct drm_gem_object *first = NULL; | 
|  | 2020 |  | 
|  | 2021 | /* Try to find the smallest clean object */ | 
|  | 2022 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 2023 | struct drm_gem_object *obj = &obj_priv->base; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2024 | if (obj->size >= min_size) { | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 2025 | if ((!obj_priv->dirty || | 
|  | 2026 | i915_gem_object_is_purgeable(obj_priv)) && | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2027 | (!best || obj->size < best->size)) { | 
|  | 2028 | best = obj; | 
|  | 2029 | if (best->size == min_size) | 
|  | 2030 | return best; | 
|  | 2031 | } | 
|  | 2032 | if (!first) | 
|  | 2033 | first = obj; | 
|  | 2034 | } | 
|  | 2035 | } | 
|  | 2036 |  | 
|  | 2037 | return best ? best : first; | 
|  | 2038 | } | 
|  | 2039 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2040 | static int | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2041 | i915_gpu_idle(struct drm_device *dev) | 
|  | 2042 | { | 
|  | 2043 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 2044 | bool lists_empty; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2045 | uint32_t seqno1, seqno2; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2046 | int ret; | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2047 |  | 
|  | 2048 | spin_lock(&dev_priv->mm.active_list_lock); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2049 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 
|  | 2050 | list_empty(&dev_priv->render_ring.active_list) && | 
|  | 2051 | (!HAS_BSD(dev) || | 
|  | 2052 | list_empty(&dev_priv->bsd_ring.active_list))); | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2053 | spin_unlock(&dev_priv->mm.active_list_lock); | 
|  | 2054 |  | 
|  | 2055 | if (lists_empty) | 
|  | 2056 | return 0; | 
|  | 2057 |  | 
|  | 2058 | /* Flush everything onto the inactive list. */ | 
|  | 2059 | i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2060 | seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2061 | &dev_priv->render_ring); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2062 | if (seqno1 == 0) | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2063 | return -ENOMEM; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2064 | ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); | 
|  | 2065 |  | 
|  | 2066 | if (HAS_BSD(dev)) { | 
|  | 2067 | seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, | 
|  | 2068 | &dev_priv->bsd_ring); | 
|  | 2069 | if (seqno2 == 0) | 
|  | 2070 | return -ENOMEM; | 
|  | 2071 |  | 
|  | 2072 | ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); | 
|  | 2073 | if (ret) | 
|  | 2074 | return ret; | 
|  | 2075 | } | 
|  | 2076 |  | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2077 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2078 | return ret; | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2079 | } | 
|  | 2080 |  | 
|  | 2081 | static int | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2082 | i915_gem_evict_everything(struct drm_device *dev) | 
|  | 2083 | { | 
|  | 2084 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2085 | int ret; | 
|  | 2086 | bool lists_empty; | 
|  | 2087 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2088 | spin_lock(&dev_priv->mm.active_list_lock); | 
|  | 2089 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 
|  | 2090 | list_empty(&dev_priv->mm.flushing_list) && | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2091 | list_empty(&dev_priv->render_ring.active_list) && | 
|  | 2092 | (!HAS_BSD(dev) | 
|  | 2093 | || list_empty(&dev_priv->bsd_ring.active_list))); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2094 | spin_unlock(&dev_priv->mm.active_list_lock); | 
|  | 2095 |  | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2096 | if (lists_empty) | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2097 | return -ENOSPC; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2098 |  | 
|  | 2099 | /* Flush everything (on to the inactive lists) and evict */ | 
| Daniel Vetter | 4df2faf | 2010-02-19 11:52:00 +0100 | [diff] [blame] | 2100 | ret = i915_gpu_idle(dev); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2101 | if (ret) | 
|  | 2102 | return ret; | 
|  | 2103 |  | 
| Daniel Vetter | 99fcb76 | 2010-02-07 16:20:18 +0100 | [diff] [blame] | 2104 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 
|  | 2105 |  | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 2106 | ret = i915_gem_evict_from_inactive_list(dev); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2107 | if (ret) | 
|  | 2108 | return ret; | 
|  | 2109 |  | 
|  | 2110 | spin_lock(&dev_priv->mm.active_list_lock); | 
|  | 2111 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 
|  | 2112 | list_empty(&dev_priv->mm.flushing_list) && | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2113 | list_empty(&dev_priv->render_ring.active_list) && | 
|  | 2114 | (!HAS_BSD(dev) | 
|  | 2115 | || list_empty(&dev_priv->bsd_ring.active_list))); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2116 | spin_unlock(&dev_priv->mm.active_list_lock); | 
|  | 2117 | BUG_ON(!lists_empty); | 
|  | 2118 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2119 | return 0; | 
|  | 2120 | } | 
|  | 2121 |  | 
|  | 2122 | static int | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2123 | i915_gem_evict_something(struct drm_device *dev, int min_size) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2124 | { | 
|  | 2125 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 2126 | struct drm_gem_object *obj; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2127 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2128 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2129 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2130 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2131 | for (;;) { | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 2132 | i915_gem_retire_requests(dev); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2133 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2134 | /* If there's an inactive buffer available now, grab it | 
|  | 2135 | * and be done. | 
|  | 2136 | */ | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2137 | obj = i915_gem_find_inactive_object(dev, min_size); | 
|  | 2138 | if (obj) { | 
|  | 2139 | struct drm_i915_gem_object *obj_priv; | 
|  | 2140 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2141 | #if WATCH_LRU | 
|  | 2142 | DRM_INFO("%s: evicting %p\n", __func__, obj); | 
|  | 2143 | #endif | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2144 | obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2145 | BUG_ON(obj_priv->pin_count != 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2146 | BUG_ON(obj_priv->active); | 
|  | 2147 |  | 
|  | 2148 | /* Wait on the rendering and unbind the buffer. */ | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2149 | return i915_gem_object_unbind(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2150 | } | 
|  | 2151 |  | 
|  | 2152 | /* If we didn't get anything, but the ring is still processing | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2153 | * things, wait for the next to finish and hopefully leave us | 
|  | 2154 | * a buffer to evict. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2155 | */ | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2156 | if (!list_empty(&render_ring->request_list)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2157 | struct drm_i915_gem_request *request; | 
|  | 2158 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2159 | request = list_first_entry(&render_ring->request_list, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2160 | struct drm_i915_gem_request, | 
|  | 2161 | list); | 
|  | 2162 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2163 | ret = i915_wait_request(dev, | 
|  | 2164 | request->seqno, request->ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2165 | if (ret) | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2166 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2167 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2168 | continue; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2169 | } | 
|  | 2170 |  | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 2171 | if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { | 
|  | 2172 | struct drm_i915_gem_request *request; | 
|  | 2173 |  | 
|  | 2174 | request = list_first_entry(&bsd_ring->request_list, | 
|  | 2175 | struct drm_i915_gem_request, | 
|  | 2176 | list); | 
|  | 2177 |  | 
|  | 2178 | ret = i915_wait_request(dev, | 
|  | 2179 | request->seqno, request->ring); | 
|  | 2180 | if (ret) | 
|  | 2181 | return ret; | 
|  | 2182 |  | 
|  | 2183 | continue; | 
|  | 2184 | } | 
|  | 2185 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2186 | /* If we didn't have anything on the request list but there | 
|  | 2187 | * are buffers awaiting a flush, emit one and try again. | 
|  | 2188 | * When we wait on it, those buffers waiting for that flush | 
|  | 2189 | * will get moved to inactive. | 
|  | 2190 | */ | 
|  | 2191 | if (!list_empty(&dev_priv->mm.flushing_list)) { | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2192 | struct drm_i915_gem_object *obj_priv; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2193 |  | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2194 | /* Find an object that we can immediately reuse */ | 
|  | 2195 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 2196 | obj = &obj_priv->base; | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2197 | if (obj->size >= min_size) | 
|  | 2198 | break; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2199 |  | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2200 | obj = NULL; | 
|  | 2201 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2202 |  | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2203 | if (obj != NULL) { | 
|  | 2204 | uint32_t seqno; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2205 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2206 | i915_gem_flush_ring(dev, | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2207 | obj->write_domain, | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2208 | obj->write_domain, | 
|  | 2209 | obj_priv->ring); | 
|  | 2210 | seqno = i915_add_request(dev, NULL, | 
|  | 2211 | obj->write_domain, | 
|  | 2212 | obj_priv->ring); | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2213 | if (seqno == 0) | 
|  | 2214 | return -ENOMEM; | 
| Chris Wilson | 9a1e258 | 2009-09-20 20:16:50 +0100 | [diff] [blame] | 2215 | continue; | 
|  | 2216 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2217 | } | 
|  | 2218 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2219 | /* If we didn't do any of the above, there's no single buffer | 
|  | 2220 | * large enough to swap out for the new one, so just evict | 
|  | 2221 | * everything and start again. (This should be rare.) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2222 | */ | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2223 | if (!list_empty (&dev_priv->mm.inactive_list)) | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 2224 | return i915_gem_evict_from_inactive_list(dev); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2225 | else | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2226 | return i915_gem_evict_everything(dev); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2227 | } | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 2228 | } | 
|  | 2229 |  | 
| Ben Gamari | 6911a9b | 2009-04-02 11:24:54 -0700 | [diff] [blame] | 2230 | int | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2231 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 
|  | 2232 | gfp_t gfpmask) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2233 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2234 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2235 | int page_count, i; | 
|  | 2236 | struct address_space *mapping; | 
|  | 2237 | struct inode *inode; | 
|  | 2238 | struct page *page; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2239 |  | 
| Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 2240 | BUG_ON(obj_priv->pages_refcount | 
|  | 2241 | == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); | 
|  | 2242 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2243 | if (obj_priv->pages_refcount++ != 0) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2244 | return 0; | 
|  | 2245 |  | 
|  | 2246 | /* Get the list of pages out of our struct file.  They'll be pinned | 
|  | 2247 | * at this point until we release them. | 
|  | 2248 | */ | 
|  | 2249 | page_count = obj->size / PAGE_SIZE; | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2250 | BUG_ON(obj_priv->pages != NULL); | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 2251 | obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2252 | if (obj_priv->pages == NULL) { | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2253 | obj_priv->pages_refcount--; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2254 | return -ENOMEM; | 
|  | 2255 | } | 
|  | 2256 |  | 
|  | 2257 | inode = obj->filp->f_path.dentry->d_inode; | 
|  | 2258 | mapping = inode->i_mapping; | 
|  | 2259 | for (i = 0; i < page_count; i++) { | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2260 | page = read_cache_page_gfp(mapping, i, | 
| Linus Torvalds | 985b823 | 2010-07-02 10:04:42 +1000 | [diff] [blame] | 2261 | GFP_HIGHUSER | | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2262 | __GFP_COLD | | 
| Linus Torvalds | cd9f040 | 2010-07-18 09:44:37 -0700 | [diff] [blame] | 2263 | __GFP_RECLAIMABLE | | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2264 | gfpmask); | 
| Chris Wilson | 1f2b101 | 2010-03-12 19:52:55 +0000 | [diff] [blame] | 2265 | if (IS_ERR(page)) | 
|  | 2266 | goto err_pages; | 
|  | 2267 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2268 | obj_priv->pages[i] = page; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2269 | } | 
| Eric Anholt | 280b713 | 2009-03-12 16:56:27 -0700 | [diff] [blame] | 2270 |  | 
|  | 2271 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 
|  | 2272 | i915_gem_object_do_bit_17_swizzle(obj); | 
|  | 2273 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2274 | return 0; | 
| Chris Wilson | 1f2b101 | 2010-03-12 19:52:55 +0000 | [diff] [blame] | 2275 |  | 
|  | 2276 | err_pages: | 
|  | 2277 | while (i--) | 
|  | 2278 | page_cache_release(obj_priv->pages[i]); | 
|  | 2279 |  | 
|  | 2280 | drm_free_large(obj_priv->pages); | 
|  | 2281 | obj_priv->pages = NULL; | 
|  | 2282 | obj_priv->pages_refcount--; | 
|  | 2283 | return PTR_ERR(page); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2284 | } | 
|  | 2285 |  | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2286 | static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) | 
|  | 2287 | { | 
|  | 2288 | struct drm_gem_object *obj = reg->obj; | 
|  | 2289 | struct drm_device *dev = obj->dev; | 
|  | 2290 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2291 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2292 | int regnum = obj_priv->fence_reg; | 
|  | 2293 | uint64_t val; | 
|  | 2294 |  | 
|  | 2295 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | 
|  | 2296 | 0xfffff000) << 32; | 
|  | 2297 | val |= obj_priv->gtt_offset & 0xfffff000; | 
|  | 2298 | val |= (uint64_t)((obj_priv->stride / 128) - 1) << | 
|  | 2299 | SANDYBRIDGE_FENCE_PITCH_SHIFT; | 
|  | 2300 |  | 
|  | 2301 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2302 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 
|  | 2303 | val |= I965_FENCE_REG_VALID; | 
|  | 2304 |  | 
|  | 2305 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); | 
|  | 2306 | } | 
|  | 2307 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2308 | static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) | 
|  | 2309 | { | 
|  | 2310 | struct drm_gem_object *obj = reg->obj; | 
|  | 2311 | struct drm_device *dev = obj->dev; | 
|  | 2312 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2313 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2314 | int regnum = obj_priv->fence_reg; | 
|  | 2315 | uint64_t val; | 
|  | 2316 |  | 
|  | 2317 | val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & | 
|  | 2318 | 0xfffff000) << 32; | 
|  | 2319 | val |= obj_priv->gtt_offset & 0xfffff000; | 
|  | 2320 | val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; | 
|  | 2321 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2322 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; | 
|  | 2323 | val |= I965_FENCE_REG_VALID; | 
|  | 2324 |  | 
|  | 2325 | I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); | 
|  | 2326 | } | 
|  | 2327 |  | 
|  | 2328 | static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | 
|  | 2329 | { | 
|  | 2330 | struct drm_gem_object *obj = reg->obj; | 
|  | 2331 | struct drm_device *dev = obj->dev; | 
|  | 2332 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2333 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2334 | int regnum = obj_priv->fence_reg; | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2335 | int tile_width; | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2336 | uint32_t fence_reg, val; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2337 | uint32_t pitch_val; | 
|  | 2338 |  | 
|  | 2339 | if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || | 
|  | 2340 | (obj_priv->gtt_offset & (obj->size - 1))) { | 
| Linus Torvalds | f06da26 | 2009-02-09 08:57:29 -0800 | [diff] [blame] | 2341 | WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2342 | __func__, obj_priv->gtt_offset, obj->size); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2343 | return; | 
|  | 2344 | } | 
|  | 2345 |  | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2346 | if (obj_priv->tiling_mode == I915_TILING_Y && | 
|  | 2347 | HAS_128_BYTE_Y_TILING(dev)) | 
|  | 2348 | tile_width = 128; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2349 | else | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2350 | tile_width = 512; | 
|  | 2351 |  | 
|  | 2352 | /* Note: pitch better be a power of two tile widths */ | 
|  | 2353 | pitch_val = obj_priv->stride / tile_width; | 
|  | 2354 | pitch_val = ffs(pitch_val) - 1; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2355 |  | 
| Daniel Vetter | c36a2a6 | 2010-04-17 15:12:03 +0200 | [diff] [blame] | 2356 | if (obj_priv->tiling_mode == I915_TILING_Y && | 
|  | 2357 | HAS_128_BYTE_Y_TILING(dev)) | 
|  | 2358 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 
|  | 2359 | else | 
|  | 2360 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | 
|  | 2361 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2362 | val = obj_priv->gtt_offset; | 
|  | 2363 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2364 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 
|  | 2365 | val |= I915_FENCE_SIZE_BITS(obj->size); | 
|  | 2366 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 
|  | 2367 | val |= I830_FENCE_REG_VALID; | 
|  | 2368 |  | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2369 | if (regnum < 8) | 
|  | 2370 | fence_reg = FENCE_REG_830_0 + (regnum * 4); | 
|  | 2371 | else | 
|  | 2372 | fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); | 
|  | 2373 | I915_WRITE(fence_reg, val); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2374 | } | 
|  | 2375 |  | 
|  | 2376 | static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) | 
|  | 2377 | { | 
|  | 2378 | struct drm_gem_object *obj = reg->obj; | 
|  | 2379 | struct drm_device *dev = obj->dev; | 
|  | 2380 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2381 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2382 | int regnum = obj_priv->fence_reg; | 
|  | 2383 | uint32_t val; | 
|  | 2384 | uint32_t pitch_val; | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2385 | uint32_t fence_size_bits; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2386 |  | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2387 | if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2388 | (obj_priv->gtt_offset & (obj->size - 1))) { | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2389 | WARN(1, "%s: object 0x%08x not 512K or size aligned\n", | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2390 | __func__, obj_priv->gtt_offset); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2391 | return; | 
|  | 2392 | } | 
|  | 2393 |  | 
| Eric Anholt | e76a16d | 2009-05-26 17:44:56 -0700 | [diff] [blame] | 2394 | pitch_val = obj_priv->stride / 128; | 
|  | 2395 | pitch_val = ffs(pitch_val) - 1; | 
|  | 2396 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | 
|  | 2397 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2398 | val = obj_priv->gtt_offset; | 
|  | 2399 | if (obj_priv->tiling_mode == I915_TILING_Y) | 
|  | 2400 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2401 | fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); | 
|  | 2402 | WARN_ON(fence_size_bits & ~0x00000f00); | 
|  | 2403 | val |= fence_size_bits; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2404 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; | 
|  | 2405 | val |= I830_FENCE_REG_VALID; | 
|  | 2406 |  | 
|  | 2407 | I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2408 | } | 
|  | 2409 |  | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2410 | static int i915_find_fence_reg(struct drm_device *dev) | 
|  | 2411 | { | 
|  | 2412 | struct drm_i915_fence_reg *reg = NULL; | 
|  | 2413 | struct drm_i915_gem_object *obj_priv = NULL; | 
|  | 2414 | struct drm_i915_private *dev_priv = dev->dev_private; | 
|  | 2415 | struct drm_gem_object *obj = NULL; | 
|  | 2416 | int i, avail, ret; | 
|  | 2417 |  | 
|  | 2418 | /* First try to find a free reg */ | 
|  | 2419 | avail = 0; | 
|  | 2420 | for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { | 
|  | 2421 | reg = &dev_priv->fence_regs[i]; | 
|  | 2422 | if (!reg->obj) | 
|  | 2423 | return i; | 
|  | 2424 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2425 | obj_priv = to_intel_bo(reg->obj); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2426 | if (!obj_priv->pin_count) | 
|  | 2427 | avail++; | 
|  | 2428 | } | 
|  | 2429 |  | 
|  | 2430 | if (avail == 0) | 
|  | 2431 | return -ENOSPC; | 
|  | 2432 |  | 
|  | 2433 | /* None available, try to steal one or wait for a user to finish */ | 
|  | 2434 | i = I915_FENCE_REG_NONE; | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2435 | list_for_each_entry(reg, &dev_priv->mm.fence_list, | 
|  | 2436 | lru_list) { | 
|  | 2437 | obj = reg->obj; | 
|  | 2438 | obj_priv = to_intel_bo(obj); | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2439 |  | 
|  | 2440 | if (obj_priv->pin_count) | 
|  | 2441 | continue; | 
|  | 2442 |  | 
|  | 2443 | /* found one! */ | 
|  | 2444 | i = obj_priv->fence_reg; | 
|  | 2445 | break; | 
|  | 2446 | } | 
|  | 2447 |  | 
|  | 2448 | BUG_ON(i == I915_FENCE_REG_NONE); | 
|  | 2449 |  | 
|  | 2450 | /* We only have a reference on obj from the active list. put_fence_reg | 
|  | 2451 | * might drop that one, causing a use-after-free in it. So hold a | 
|  | 2452 | * private reference to obj like the other callers of put_fence_reg | 
|  | 2453 | * (set_tiling ioctl) do. */ | 
|  | 2454 | drm_gem_object_reference(obj); | 
|  | 2455 | ret = i915_gem_object_put_fence_reg(obj); | 
|  | 2456 | drm_gem_object_unreference(obj); | 
|  | 2457 | if (ret != 0) | 
|  | 2458 | return ret; | 
|  | 2459 |  | 
|  | 2460 | return i; | 
|  | 2461 | } | 
|  | 2462 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2463 | /** | 
|  | 2464 | * i915_gem_object_get_fence_reg - set up a fence reg for an object | 
|  | 2465 | * @obj: object to map through a fence reg | 
|  | 2466 | * | 
|  | 2467 | * When mapping objects through the GTT, userspace wants to be able to write | 
|  | 2468 | * to them without having to worry about swizzling if the object is tiled. | 
|  | 2469 | * | 
|  | 2470 | * This function walks the fence regs looking for a free one for @obj, | 
|  | 2471 | * stealing one if it can't find any. | 
|  | 2472 | * | 
|  | 2473 | * It then sets up the reg based on the object's properties: address, pitch | 
|  | 2474 | * and tiling format. | 
|  | 2475 | */ | 
| Chris Wilson | 8c4b8c3 | 2009-06-17 22:08:52 +0100 | [diff] [blame] | 2476 | int | 
|  | 2477 | i915_gem_object_get_fence_reg(struct drm_gem_object *obj) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2478 | { | 
|  | 2479 | struct drm_device *dev = obj->dev; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2480 | struct drm_i915_private *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2481 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2482 | struct drm_i915_fence_reg *reg = NULL; | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2483 | int ret; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2484 |  | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 2485 | /* Just update our place in the LRU if our fence is getting used. */ | 
|  | 2486 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2487 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 
|  | 2488 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 2489 | return 0; | 
|  | 2490 | } | 
|  | 2491 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2492 | switch (obj_priv->tiling_mode) { | 
|  | 2493 | case I915_TILING_NONE: | 
|  | 2494 | WARN(1, "allocating a fence for non-tiled object?\n"); | 
|  | 2495 | break; | 
|  | 2496 | case I915_TILING_X: | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2497 | if (!obj_priv->stride) | 
|  | 2498 | return -EINVAL; | 
|  | 2499 | WARN((obj_priv->stride & (512 - 1)), | 
|  | 2500 | "object 0x%08x is X tiled but has non-512B pitch\n", | 
|  | 2501 | obj_priv->gtt_offset); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2502 | break; | 
|  | 2503 | case I915_TILING_Y: | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2504 | if (!obj_priv->stride) | 
|  | 2505 | return -EINVAL; | 
|  | 2506 | WARN((obj_priv->stride & (128 - 1)), | 
|  | 2507 | "object 0x%08x is Y tiled but has non-128B pitch\n", | 
|  | 2508 | obj_priv->gtt_offset); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2509 | break; | 
|  | 2510 | } | 
|  | 2511 |  | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2512 | ret = i915_find_fence_reg(dev); | 
|  | 2513 | if (ret < 0) | 
|  | 2514 | return ret; | 
| Chris Wilson | fc7170b | 2009-02-11 14:26:46 +0000 | [diff] [blame] | 2515 |  | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2516 | obj_priv->fence_reg = ret; | 
|  | 2517 | reg = &dev_priv->fence_regs[obj_priv->fence_reg]; | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2518 | list_add_tail(®->lru_list, &dev_priv->mm.fence_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 2519 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2520 | reg->obj = obj; | 
|  | 2521 |  | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2522 | if (IS_GEN6(dev)) | 
|  | 2523 | sandybridge_write_fence_reg(reg); | 
|  | 2524 | else if (IS_I965G(dev)) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2525 | i965_write_fence_reg(reg); | 
|  | 2526 | else if (IS_I9XX(dev)) | 
|  | 2527 | i915_write_fence_reg(reg); | 
|  | 2528 | else | 
|  | 2529 | i830_write_fence_reg(reg); | 
| Eric Anholt | d9ddcb9 | 2009-01-27 10:33:49 -0800 | [diff] [blame] | 2530 |  | 
| Daniel Vetter | ae3db24 | 2010-02-19 11:51:58 +0100 | [diff] [blame] | 2531 | trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, | 
|  | 2532 | obj_priv->tiling_mode); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2533 |  | 
| Eric Anholt | d9ddcb9 | 2009-01-27 10:33:49 -0800 | [diff] [blame] | 2534 | return 0; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2535 | } | 
|  | 2536 |  | 
|  | 2537 | /** | 
|  | 2538 | * i915_gem_clear_fence_reg - clear out fence register info | 
|  | 2539 | * @obj: object to clear | 
|  | 2540 | * | 
|  | 2541 | * Zeroes out the fence register itself and clears out the associated | 
|  | 2542 | * data structures in dev_priv and obj_priv. | 
|  | 2543 | */ | 
|  | 2544 | static void | 
|  | 2545 | i915_gem_clear_fence_reg(struct drm_gem_object *obj) | 
|  | 2546 | { | 
|  | 2547 | struct drm_device *dev = obj->dev; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2548 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2549 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2550 | struct drm_i915_fence_reg *reg = | 
|  | 2551 | &dev_priv->fence_regs[obj_priv->fence_reg]; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2552 |  | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2553 | if (IS_GEN6(dev)) { | 
|  | 2554 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + | 
|  | 2555 | (obj_priv->fence_reg * 8), 0); | 
|  | 2556 | } else if (IS_I965G(dev)) { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2557 | I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); | 
| Eric Anholt | 4e901fd | 2009-10-26 16:44:17 -0700 | [diff] [blame] | 2558 | } else { | 
| Eric Anholt | dc529a4 | 2009-03-10 22:34:49 -0700 | [diff] [blame] | 2559 | uint32_t fence_reg; | 
|  | 2560 |  | 
|  | 2561 | if (obj_priv->fence_reg < 8) | 
|  | 2562 | fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; | 
|  | 2563 | else | 
|  | 2564 | fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - | 
|  | 2565 | 8) * 4; | 
|  | 2566 |  | 
|  | 2567 | I915_WRITE(fence_reg, 0); | 
|  | 2568 | } | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2569 |  | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2570 | reg->obj = NULL; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2571 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 2572 | list_del_init(®->lru_list); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 2573 | } | 
|  | 2574 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2575 | /** | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2576 | * i915_gem_object_put_fence_reg - waits on outstanding fenced access | 
|  | 2577 | * to the buffer to finish, and then resets the fence register. | 
|  | 2578 | * @obj: tiled object holding a fence register. | 
|  | 2579 | * | 
|  | 2580 | * Zeroes out the fence register itself and clears out the associated | 
|  | 2581 | * data structures in dev_priv and obj_priv. | 
|  | 2582 | */ | 
|  | 2583 | int | 
|  | 2584 | i915_gem_object_put_fence_reg(struct drm_gem_object *obj) | 
|  | 2585 | { | 
|  | 2586 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2587 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2588 |  | 
|  | 2589 | if (obj_priv->fence_reg == I915_FENCE_REG_NONE) | 
|  | 2590 | return 0; | 
|  | 2591 |  | 
| Daniel Vetter | 10ae9bd | 2010-02-01 13:59:17 +0100 | [diff] [blame] | 2592 | /* If we've changed tiling, GTT-mappings of the object | 
|  | 2593 | * need to re-fault to ensure that the correct fence register | 
|  | 2594 | * setup is in place. | 
|  | 2595 | */ | 
|  | 2596 | i915_gem_release_mmap(obj); | 
|  | 2597 |  | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2598 | /* On the i915, GPU access to tiled buffers is via a fence, | 
|  | 2599 | * therefore we must wait for any outstanding access to complete | 
|  | 2600 | * before clearing the fence. | 
|  | 2601 | */ | 
|  | 2602 | if (!IS_I965G(dev)) { | 
|  | 2603 | int ret; | 
|  | 2604 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2605 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 
|  | 2606 | if (ret != 0) | 
|  | 2607 | return ret; | 
|  | 2608 |  | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2609 | ret = i915_gem_object_wait_rendering(obj); | 
|  | 2610 | if (ret != 0) | 
|  | 2611 | return ret; | 
|  | 2612 | } | 
|  | 2613 |  | 
| Daniel Vetter | 4a72661 | 2010-02-01 13:59:16 +0100 | [diff] [blame] | 2614 | i915_gem_object_flush_gtt_write_domain(obj); | 
| Chris Wilson | 52dc7d3 | 2009-06-06 09:46:01 +0100 | [diff] [blame] | 2615 | i915_gem_clear_fence_reg (obj); | 
|  | 2616 |  | 
|  | 2617 | return 0; | 
|  | 2618 | } | 
|  | 2619 |  | 
|  | 2620 | /** | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2621 | * Finds free space in the GTT aperture and binds the object there. | 
|  | 2622 | */ | 
|  | 2623 | static int | 
|  | 2624 | i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | 
|  | 2625 | { | 
|  | 2626 | struct drm_device *dev = obj->dev; | 
|  | 2627 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2628 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2629 | struct drm_mm_node *free_space; | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2630 | gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2631 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2632 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 2633 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 2634 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 
|  | 2635 | return -EINVAL; | 
|  | 2636 | } | 
|  | 2637 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2638 | if (alignment == 0) | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 2639 | alignment = i915_gem_get_gtt_alignment(obj); | 
| Daniel Vetter | 8d7773a | 2009-03-29 14:09:41 +0200 | [diff] [blame] | 2640 | if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2641 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); | 
|  | 2642 | return -EINVAL; | 
|  | 2643 | } | 
|  | 2644 |  | 
| Chris Wilson | 654fc60 | 2010-05-27 13:18:21 +0100 | [diff] [blame] | 2645 | /* If the object is bigger than the entire aperture, reject it early | 
|  | 2646 | * before evicting everything in a vain attempt to find space. | 
|  | 2647 | */ | 
|  | 2648 | if (obj->size > dev->gtt_total) { | 
|  | 2649 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); | 
|  | 2650 | return -E2BIG; | 
|  | 2651 | } | 
|  | 2652 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2653 | search_free: | 
|  | 2654 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | 
|  | 2655 | obj->size, alignment, 0); | 
|  | 2656 | if (free_space != NULL) { | 
|  | 2657 | obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, | 
|  | 2658 | alignment); | 
| Daniel Vetter | db3307a | 2010-07-02 15:02:12 +0100 | [diff] [blame] | 2659 | if (obj_priv->gtt_space != NULL) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2660 | obj_priv->gtt_offset = obj_priv->gtt_space->start; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2661 | } | 
|  | 2662 | if (obj_priv->gtt_space == NULL) { | 
|  | 2663 | /* If the gtt is empty and we're still having trouble | 
|  | 2664 | * fitting our object in, we're out of memory. | 
|  | 2665 | */ | 
|  | 2666 | #if WATCH_LRU | 
|  | 2667 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 
|  | 2668 | #endif | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2669 | ret = i915_gem_evict_something(dev, obj->size); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2670 | if (ret) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2671 | return ret; | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2672 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2673 | goto search_free; | 
|  | 2674 | } | 
|  | 2675 |  | 
|  | 2676 | #if WATCH_BUF | 
| Krzysztof Halasa | cfd43c0 | 2009-06-20 00:31:28 +0200 | [diff] [blame] | 2677 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2678 | obj->size, obj_priv->gtt_offset); | 
|  | 2679 | #endif | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2680 | ret = i915_gem_object_get_pages(obj, gfpmask); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2681 | if (ret) { | 
|  | 2682 | drm_mm_put_block(obj_priv->gtt_space); | 
|  | 2683 | obj_priv->gtt_space = NULL; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2684 |  | 
|  | 2685 | if (ret == -ENOMEM) { | 
|  | 2686 | /* first try to clear up some space from the GTT */ | 
|  | 2687 | ret = i915_gem_evict_something(dev, obj->size); | 
|  | 2688 | if (ret) { | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2689 | /* now try to shrink everyone else */ | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 2690 | if (gfpmask) { | 
|  | 2691 | gfpmask = 0; | 
|  | 2692 | goto search_free; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2693 | } | 
|  | 2694 |  | 
|  | 2695 | return ret; | 
|  | 2696 | } | 
|  | 2697 |  | 
|  | 2698 | goto search_free; | 
|  | 2699 | } | 
|  | 2700 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2701 | return ret; | 
|  | 2702 | } | 
|  | 2703 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2704 | /* Create an AGP memory structure pointing at our pages, and bind it | 
|  | 2705 | * into the GTT. | 
|  | 2706 | */ | 
|  | 2707 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2708 | obj_priv->pages, | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2709 | obj->size >> PAGE_SHIFT, | 
| Keith Packard | ba1eb1d | 2008-10-14 19:55:10 -0700 | [diff] [blame] | 2710 | obj_priv->gtt_offset, | 
|  | 2711 | obj_priv->agp_type); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2712 | if (obj_priv->agp_mem == NULL) { | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2713 | i915_gem_object_put_pages(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2714 | drm_mm_put_block(obj_priv->gtt_space); | 
|  | 2715 | obj_priv->gtt_space = NULL; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2716 |  | 
|  | 2717 | ret = i915_gem_evict_something(dev, obj->size); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 2718 | if (ret) | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2719 | return ret; | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 2720 |  | 
|  | 2721 | goto search_free; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2722 | } | 
|  | 2723 | atomic_inc(&dev->gtt_count); | 
|  | 2724 | atomic_add(obj->size, &dev->gtt_memory); | 
|  | 2725 |  | 
|  | 2726 | /* Assert that the object is not currently in any GPU domain. As it | 
|  | 2727 | * wasn't in the GTT, there shouldn't be any way it could have been in | 
|  | 2728 | * a GPU cache | 
|  | 2729 | */ | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 2730 | BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); | 
|  | 2731 | BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2732 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2733 | trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); | 
|  | 2734 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2735 | return 0; | 
|  | 2736 | } | 
|  | 2737 |  | 
|  | 2738 | void | 
|  | 2739 | i915_gem_clflush_object(struct drm_gem_object *obj) | 
|  | 2740 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2741 | struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2742 |  | 
|  | 2743 | /* If we don't have a page list set up, then we're not pinned | 
|  | 2744 | * to GPU, and we can ignore the cache flush because it'll happen | 
|  | 2745 | * again at bind time. | 
|  | 2746 | */ | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2747 | if (obj_priv->pages == NULL) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2748 | return; | 
|  | 2749 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2750 | trace_i915_gem_object_clflush(obj); | 
| Eric Anholt | cfa16a0 | 2009-05-26 18:46:16 -0700 | [diff] [blame] | 2751 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 2752 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 2753 | } | 
|  | 2754 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2755 | /** Flushes any GPU write domain for the object if it's dirty. */ | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2756 | static int | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2757 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | 
|  | 2758 | { | 
|  | 2759 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2760 | uint32_t old_write_domain; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2761 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2762 |  | 
|  | 2763 | if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2764 | return 0; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2765 |  | 
|  | 2766 | /* Queue the GPU write cache flushing we need. */ | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2767 | old_write_domain = obj->write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2768 | i915_gem_flush(dev, 0, obj->write_domain); | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2769 | if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) | 
|  | 2770 | return -ENOMEM; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2771 |  | 
|  | 2772 | trace_i915_gem_object_change_domain(obj, | 
|  | 2773 | obj->read_domains, | 
|  | 2774 | old_write_domain); | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2775 | return 0; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2776 | } | 
|  | 2777 |  | 
|  | 2778 | /** Flushes the GTT write domain for the object if it's dirty. */ | 
|  | 2779 | static void | 
|  | 2780 | i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) | 
|  | 2781 | { | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2782 | uint32_t old_write_domain; | 
|  | 2783 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2784 | if (obj->write_domain != I915_GEM_DOMAIN_GTT) | 
|  | 2785 | return; | 
|  | 2786 |  | 
|  | 2787 | /* No actual flushing is required for the GTT write domain.   Writes | 
|  | 2788 | * to it immediately go to main memory as far as we know, so there's | 
|  | 2789 | * no chipset flush.  It also doesn't land in render cache. | 
|  | 2790 | */ | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2791 | old_write_domain = obj->write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2792 | obj->write_domain = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2793 |  | 
|  | 2794 | trace_i915_gem_object_change_domain(obj, | 
|  | 2795 | obj->read_domains, | 
|  | 2796 | old_write_domain); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2797 | } | 
|  | 2798 |  | 
|  | 2799 | /** Flushes the CPU write domain for the object if it's dirty. */ | 
|  | 2800 | static void | 
|  | 2801 | i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) | 
|  | 2802 | { | 
|  | 2803 | struct drm_device *dev = obj->dev; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2804 | uint32_t old_write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2805 |  | 
|  | 2806 | if (obj->write_domain != I915_GEM_DOMAIN_CPU) | 
|  | 2807 | return; | 
|  | 2808 |  | 
|  | 2809 | i915_gem_clflush_object(obj); | 
|  | 2810 | drm_agp_chipset_flush(dev); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2811 | old_write_domain = obj->write_domain; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2812 | obj->write_domain = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2813 |  | 
|  | 2814 | trace_i915_gem_object_change_domain(obj, | 
|  | 2815 | obj->read_domains, | 
|  | 2816 | old_write_domain); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2817 | } | 
|  | 2818 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2819 | int | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 2820 | i915_gem_object_flush_write_domain(struct drm_gem_object *obj) | 
|  | 2821 | { | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2822 | int ret = 0; | 
|  | 2823 |  | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 2824 | switch (obj->write_domain) { | 
|  | 2825 | case I915_GEM_DOMAIN_GTT: | 
|  | 2826 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 2827 | break; | 
|  | 2828 | case I915_GEM_DOMAIN_CPU: | 
|  | 2829 | i915_gem_object_flush_cpu_write_domain(obj); | 
|  | 2830 | break; | 
|  | 2831 | default: | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2832 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 2833 | break; | 
|  | 2834 | } | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2835 |  | 
|  | 2836 | return ret; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 2837 | } | 
|  | 2838 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2839 | /** | 
|  | 2840 | * Moves a single object to the GTT read, and possibly write domain. | 
|  | 2841 | * | 
|  | 2842 | * This function returns when the move is complete, including waiting on | 
|  | 2843 | * flushes to occur. | 
|  | 2844 | */ | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 2845 | int | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2846 | i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | 
|  | 2847 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2848 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2849 | uint32_t old_write_domain, old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2850 | int ret; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2851 |  | 
| Eric Anholt | 0235439 | 2008-11-26 13:58:13 -0800 | [diff] [blame] | 2852 | /* Not valid to be called on unbound objects. */ | 
|  | 2853 | if (obj_priv->gtt_space == NULL) | 
|  | 2854 | return -EINVAL; | 
|  | 2855 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2856 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 
|  | 2857 | if (ret != 0) | 
|  | 2858 | return ret; | 
|  | 2859 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2860 | /* Wait on any GPU rendering and flushing to occur. */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2861 | ret = i915_gem_object_wait_rendering(obj); | 
|  | 2862 | if (ret != 0) | 
|  | 2863 | return ret; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2864 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2865 | old_write_domain = obj->write_domain; | 
|  | 2866 | old_read_domains = obj->read_domains; | 
|  | 2867 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2868 | /* If we're writing through the GTT domain, then CPU and GPU caches | 
|  | 2869 | * will need to be invalidated at next use. | 
|  | 2870 | */ | 
|  | 2871 | if (write) | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2872 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2873 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2874 | i915_gem_object_flush_cpu_write_domain(obj); | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2875 |  | 
|  | 2876 | /* It should now be out of any other write domains, and we can update | 
|  | 2877 | * the domain values for our changes. | 
|  | 2878 | */ | 
|  | 2879 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 
|  | 2880 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2881 | if (write) { | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2882 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2883 | obj_priv->dirty = 1; | 
|  | 2884 | } | 
|  | 2885 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2886 | trace_i915_gem_object_change_domain(obj, | 
|  | 2887 | old_read_domains, | 
|  | 2888 | old_write_domain); | 
|  | 2889 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2890 | return 0; | 
|  | 2891 | } | 
|  | 2892 |  | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2893 | /* | 
|  | 2894 | * Prepare buffer for display plane. Use uninterruptible for possible flush | 
|  | 2895 | * wait, as in modesetting process we're not supposed to be interrupted. | 
|  | 2896 | */ | 
|  | 2897 | int | 
|  | 2898 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | 
|  | 2899 | { | 
|  | 2900 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 2901 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2902 | uint32_t old_write_domain, old_read_domains; | 
|  | 2903 | int ret; | 
|  | 2904 |  | 
|  | 2905 | /* Not valid to be called on unbound objects. */ | 
|  | 2906 | if (obj_priv->gtt_space == NULL) | 
|  | 2907 | return -EINVAL; | 
|  | 2908 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2909 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 
|  | 2910 | if (ret) | 
|  | 2911 | return ret; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2912 |  | 
|  | 2913 | /* Wait on any GPU rendering and flushing to occur. */ | 
|  | 2914 | if (obj_priv->active) { | 
|  | 2915 | #if WATCH_BUF | 
|  | 2916 | DRM_INFO("%s: object %p wait for seqno %08x\n", | 
|  | 2917 | __func__, obj, obj_priv->last_rendering_seqno); | 
|  | 2918 | #endif | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 2919 | ret = i915_do_wait_request(dev, | 
|  | 2920 | obj_priv->last_rendering_seqno, | 
|  | 2921 | 0, | 
|  | 2922 | obj_priv->ring); | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2923 | if (ret != 0) | 
|  | 2924 | return ret; | 
|  | 2925 | } | 
|  | 2926 |  | 
| Chris Wilson | b118c1e | 2010-05-27 13:18:14 +0100 | [diff] [blame] | 2927 | i915_gem_object_flush_cpu_write_domain(obj); | 
|  | 2928 |  | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2929 | old_write_domain = obj->write_domain; | 
|  | 2930 | old_read_domains = obj->read_domains; | 
|  | 2931 |  | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2932 | /* It should now be out of any other write domains, and we can update | 
|  | 2933 | * the domain values for our changes. | 
|  | 2934 | */ | 
|  | 2935 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | 
| Chris Wilson | b118c1e | 2010-05-27 13:18:14 +0100 | [diff] [blame] | 2936 | obj->read_domains = I915_GEM_DOMAIN_GTT; | 
| Zhenyu Wang | b9241ea | 2009-11-25 13:09:39 +0800 | [diff] [blame] | 2937 | obj->write_domain = I915_GEM_DOMAIN_GTT; | 
|  | 2938 | obj_priv->dirty = 1; | 
|  | 2939 |  | 
|  | 2940 | trace_i915_gem_object_change_domain(obj, | 
|  | 2941 | old_read_domains, | 
|  | 2942 | old_write_domain); | 
|  | 2943 |  | 
|  | 2944 | return 0; | 
|  | 2945 | } | 
|  | 2946 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2947 | /** | 
|  | 2948 | * Moves a single object to the CPU read, and possibly write domain. | 
|  | 2949 | * | 
|  | 2950 | * This function returns when the move is complete, including waiting on | 
|  | 2951 | * flushes to occur. | 
|  | 2952 | */ | 
|  | 2953 | static int | 
|  | 2954 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 
|  | 2955 | { | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2956 | uint32_t old_write_domain, old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2957 | int ret; | 
|  | 2958 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 2959 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 
|  | 2960 | if (ret) | 
|  | 2961 | return ret; | 
|  | 2962 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2963 | /* Wait on any GPU rendering and flushing to occur. */ | 
|  | 2964 | ret = i915_gem_object_wait_rendering(obj); | 
|  | 2965 | if (ret != 0) | 
|  | 2966 | return ret; | 
|  | 2967 |  | 
|  | 2968 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 2969 |  | 
|  | 2970 | /* If we have a partially-valid cache of the object in the CPU, | 
|  | 2971 | * finish invalidating it and free the per-page flags. | 
|  | 2972 | */ | 
|  | 2973 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 
|  | 2974 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2975 | old_write_domain = obj->write_domain; | 
|  | 2976 | old_read_domains = obj->read_domains; | 
|  | 2977 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2978 | /* Flush the CPU cache if it's still invalid. */ | 
|  | 2979 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 
|  | 2980 | i915_gem_clflush_object(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 2981 |  | 
|  | 2982 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 
|  | 2983 | } | 
|  | 2984 |  | 
|  | 2985 | /* It should now be out of any other write domains, and we can update | 
|  | 2986 | * the domain values for our changes. | 
|  | 2987 | */ | 
|  | 2988 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 
|  | 2989 |  | 
|  | 2990 | /* If we're writing through the CPU, then the GPU read domains will | 
|  | 2991 | * need to be invalidated at next use. | 
|  | 2992 | */ | 
|  | 2993 | if (write) { | 
|  | 2994 | obj->read_domains &= I915_GEM_DOMAIN_CPU; | 
|  | 2995 | obj->write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 2996 | } | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 2997 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 2998 | trace_i915_gem_object_change_domain(obj, | 
|  | 2999 | old_read_domains, | 
|  | 3000 | old_write_domain); | 
|  | 3001 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3002 | return 0; | 
|  | 3003 | } | 
|  | 3004 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3005 | /* | 
|  | 3006 | * Set the next domain for the specified object. This | 
|  | 3007 | * may not actually perform the necessary flushing/invaliding though, | 
|  | 3008 | * as that may want to be batched with other set_domain operations | 
|  | 3009 | * | 
|  | 3010 | * This is (we hope) the only really tricky part of gem. The goal | 
|  | 3011 | * is fairly simple -- track which caches hold bits of the object | 
|  | 3012 | * and make sure they remain coherent. A few concrete examples may | 
|  | 3013 | * help to explain how it works. For shorthand, we use the notation | 
|  | 3014 | * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | 
|  | 3015 | * a pair of read and write domain masks. | 
|  | 3016 | * | 
|  | 3017 | * Case 1: the batch buffer | 
|  | 3018 | * | 
|  | 3019 | *	1. Allocated | 
|  | 3020 | *	2. Written by CPU | 
|  | 3021 | *	3. Mapped to GTT | 
|  | 3022 | *	4. Read by GPU | 
|  | 3023 | *	5. Unmapped from GTT | 
|  | 3024 | *	6. Freed | 
|  | 3025 | * | 
|  | 3026 | *	Let's take these a step at a time | 
|  | 3027 | * | 
|  | 3028 | *	1. Allocated | 
|  | 3029 | *		Pages allocated from the kernel may still have | 
|  | 3030 | *		cache contents, so we set them to (CPU, CPU) always. | 
|  | 3031 | *	2. Written by CPU (using pwrite) | 
|  | 3032 | *		The pwrite function calls set_domain (CPU, CPU) and | 
|  | 3033 | *		this function does nothing (as nothing changes) | 
|  | 3034 | *	3. Mapped by GTT | 
|  | 3035 | *		This function asserts that the object is not | 
|  | 3036 | *		currently in any GPU-based read or write domains | 
|  | 3037 | *	4. Read by GPU | 
|  | 3038 | *		i915_gem_execbuffer calls set_domain (COMMAND, 0). | 
|  | 3039 | *		As write_domain is zero, this function adds in the | 
|  | 3040 | *		current read domains (CPU+COMMAND, 0). | 
|  | 3041 | *		flush_domains is set to CPU. | 
|  | 3042 | *		invalidate_domains is set to COMMAND | 
|  | 3043 | *		clflush is run to get data out of the CPU caches | 
|  | 3044 | *		then i915_dev_set_domain calls i915_gem_flush to | 
|  | 3045 | *		emit an MI_FLUSH and drm_agp_chipset_flush | 
|  | 3046 | *	5. Unmapped from GTT | 
|  | 3047 | *		i915_gem_object_unbind calls set_domain (CPU, CPU) | 
|  | 3048 | *		flush_domains and invalidate_domains end up both zero | 
|  | 3049 | *		so no flushing/invalidating happens | 
|  | 3050 | *	6. Freed | 
|  | 3051 | *		yay, done | 
|  | 3052 | * | 
|  | 3053 | * Case 2: The shared render buffer | 
|  | 3054 | * | 
|  | 3055 | *	1. Allocated | 
|  | 3056 | *	2. Mapped to GTT | 
|  | 3057 | *	3. Read/written by GPU | 
|  | 3058 | *	4. set_domain to (CPU,CPU) | 
|  | 3059 | *	5. Read/written by CPU | 
|  | 3060 | *	6. Read/written by GPU | 
|  | 3061 | * | 
|  | 3062 | *	1. Allocated | 
|  | 3063 | *		Same as last example, (CPU, CPU) | 
|  | 3064 | *	2. Mapped to GTT | 
|  | 3065 | *		Nothing changes (assertions find that it is not in the GPU) | 
|  | 3066 | *	3. Read/written by GPU | 
|  | 3067 | *		execbuffer calls set_domain (RENDER, RENDER) | 
|  | 3068 | *		flush_domains gets CPU | 
|  | 3069 | *		invalidate_domains gets GPU | 
|  | 3070 | *		clflush (obj) | 
|  | 3071 | *		MI_FLUSH and drm_agp_chipset_flush | 
|  | 3072 | *	4. set_domain (CPU, CPU) | 
|  | 3073 | *		flush_domains gets GPU | 
|  | 3074 | *		invalidate_domains gets CPU | 
|  | 3075 | *		wait_rendering (obj) to make sure all drawing is complete. | 
|  | 3076 | *		This will include an MI_FLUSH to get the data from GPU | 
|  | 3077 | *		to memory | 
|  | 3078 | *		clflush (obj) to invalidate the CPU cache | 
|  | 3079 | *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | 
|  | 3080 | *	5. Read/written by CPU | 
|  | 3081 | *		cache lines are loaded and dirtied | 
|  | 3082 | *	6. Read written by GPU | 
|  | 3083 | *		Same as last GPU access | 
|  | 3084 | * | 
|  | 3085 | * Case 3: The constant buffer | 
|  | 3086 | * | 
|  | 3087 | *	1. Allocated | 
|  | 3088 | *	2. Written by CPU | 
|  | 3089 | *	3. Read by GPU | 
|  | 3090 | *	4. Updated (written) by CPU again | 
|  | 3091 | *	5. Read by GPU | 
|  | 3092 | * | 
|  | 3093 | *	1. Allocated | 
|  | 3094 | *		(CPU, CPU) | 
|  | 3095 | *	2. Written by CPU | 
|  | 3096 | *		(CPU, CPU) | 
|  | 3097 | *	3. Read by GPU | 
|  | 3098 | *		(CPU+RENDER, 0) | 
|  | 3099 | *		flush_domains = CPU | 
|  | 3100 | *		invalidate_domains = RENDER | 
|  | 3101 | *		clflush (obj) | 
|  | 3102 | *		MI_FLUSH | 
|  | 3103 | *		drm_agp_chipset_flush | 
|  | 3104 | *	4. Updated (written) by CPU again | 
|  | 3105 | *		(CPU, CPU) | 
|  | 3106 | *		flush_domains = 0 (no previous write domain) | 
|  | 3107 | *		invalidate_domains = 0 (no new read domains) | 
|  | 3108 | *	5. Read by GPU | 
|  | 3109 | *		(CPU+RENDER, 0) | 
|  | 3110 | *		flush_domains = CPU | 
|  | 3111 | *		invalidate_domains = RENDER | 
|  | 3112 | *		clflush (obj) | 
|  | 3113 | *		MI_FLUSH | 
|  | 3114 | *		drm_agp_chipset_flush | 
|  | 3115 | */ | 
| Keith Packard | c0d9082 | 2008-11-20 23:11:08 -0800 | [diff] [blame] | 3116 | static void | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3117 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3118 | { | 
|  | 3119 | struct drm_device		*dev = obj->dev; | 
| Chris Wilson | 88f356b | 2010-08-04 13:55:32 +0100 | [diff] [blame] | 3120 | drm_i915_private_t		*dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3121 | struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3122 | uint32_t			invalidate_domains = 0; | 
|  | 3123 | uint32_t			flush_domains = 0; | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3124 | uint32_t			old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3125 |  | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3126 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); | 
|  | 3127 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3128 |  | 
| Jesse Barnes | 652c393 | 2009-08-17 13:31:43 -0700 | [diff] [blame] | 3129 | intel_mark_busy(dev, obj); | 
|  | 3130 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3131 | #if WATCH_BUF | 
|  | 3132 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 
|  | 3133 | __func__, obj, | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3134 | obj->read_domains, obj->pending_read_domains, | 
|  | 3135 | obj->write_domain, obj->pending_write_domain); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3136 | #endif | 
|  | 3137 | /* | 
|  | 3138 | * If the object isn't moving to a new write domain, | 
|  | 3139 | * let the object stay in multiple read domains | 
|  | 3140 | */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3141 | if (obj->pending_write_domain == 0) | 
|  | 3142 | obj->pending_read_domains |= obj->read_domains; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3143 | else | 
|  | 3144 | obj_priv->dirty = 1; | 
|  | 3145 |  | 
|  | 3146 | /* | 
|  | 3147 | * Flush the current write domain if | 
|  | 3148 | * the new read domains don't match. Invalidate | 
|  | 3149 | * any read domains which differ from the old | 
|  | 3150 | * write domain | 
|  | 3151 | */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3152 | if (obj->write_domain && | 
|  | 3153 | obj->write_domain != obj->pending_read_domains) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3154 | flush_domains |= obj->write_domain; | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3155 | invalidate_domains |= | 
|  | 3156 | obj->pending_read_domains & ~obj->write_domain; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3157 | } | 
|  | 3158 | /* | 
|  | 3159 | * Invalidate any read caches which may have | 
|  | 3160 | * stale data. That is, any new read domains. | 
|  | 3161 | */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3162 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3163 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | 
|  | 3164 | #if WATCH_BUF | 
|  | 3165 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 
|  | 3166 | __func__, flush_domains, invalidate_domains); | 
|  | 3167 | #endif | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3168 | i915_gem_clflush_object(obj); | 
|  | 3169 | } | 
|  | 3170 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3171 | old_read_domains = obj->read_domains; | 
|  | 3172 |  | 
| Eric Anholt | efbeed9 | 2009-02-19 14:54:51 -0800 | [diff] [blame] | 3173 | /* The actual obj->write_domain will be updated with | 
|  | 3174 | * pending_write_domain after we emit the accumulated flush for all | 
|  | 3175 | * of our domain changes in execbuffers (which clears objects' | 
|  | 3176 | * write_domains).  So if we have a current write domain that we | 
|  | 3177 | * aren't changing, set pending_write_domain to that. | 
|  | 3178 | */ | 
|  | 3179 | if (flush_domains == 0 && obj->pending_write_domain == 0) | 
|  | 3180 | obj->pending_write_domain = obj->write_domain; | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3181 | obj->read_domains = obj->pending_read_domains; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3182 |  | 
| Chris Wilson | 88f356b | 2010-08-04 13:55:32 +0100 | [diff] [blame] | 3183 | if (flush_domains & I915_GEM_GPU_DOMAINS) { | 
|  | 3184 | if (obj_priv->ring == &dev_priv->render_ring) | 
|  | 3185 | dev_priv->flush_rings |= FLUSH_RENDER_RING; | 
|  | 3186 | else if (obj_priv->ring == &dev_priv->bsd_ring) | 
|  | 3187 | dev_priv->flush_rings |= FLUSH_BSD_RING; | 
|  | 3188 | } | 
|  | 3189 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3190 | dev->invalidate_domains |= invalidate_domains; | 
|  | 3191 | dev->flush_domains |= flush_domains; | 
|  | 3192 | #if WATCH_BUF | 
|  | 3193 | DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", | 
|  | 3194 | __func__, | 
|  | 3195 | obj->read_domains, obj->write_domain, | 
|  | 3196 | dev->invalidate_domains, dev->flush_domains); | 
|  | 3197 | #endif | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3198 |  | 
|  | 3199 | trace_i915_gem_object_change_domain(obj, | 
|  | 3200 | old_read_domains, | 
|  | 3201 | obj->write_domain); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3202 | } | 
|  | 3203 |  | 
|  | 3204 | /** | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3205 | * Moves the object from a partially CPU read to a full one. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3206 | * | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3207 | * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), | 
|  | 3208 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). | 
|  | 3209 | */ | 
|  | 3210 | static void | 
|  | 3211 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 
|  | 3212 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3213 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3214 |  | 
|  | 3215 | if (!obj_priv->page_cpu_valid) | 
|  | 3216 | return; | 
|  | 3217 |  | 
|  | 3218 | /* If we're partially in the CPU read domain, finish moving it in. | 
|  | 3219 | */ | 
|  | 3220 | if (obj->read_domains & I915_GEM_DOMAIN_CPU) { | 
|  | 3221 | int i; | 
|  | 3222 |  | 
|  | 3223 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 
|  | 3224 | if (obj_priv->page_cpu_valid[i]) | 
|  | 3225 | continue; | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 3226 | drm_clflush_pages(obj_priv->pages + i, 1); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3227 | } | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3228 | } | 
|  | 3229 |  | 
|  | 3230 | /* Free the page_cpu_valid mappings which are now stale, whether | 
|  | 3231 | * or not we've got I915_GEM_DOMAIN_CPU. | 
|  | 3232 | */ | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3233 | kfree(obj_priv->page_cpu_valid); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3234 | obj_priv->page_cpu_valid = NULL; | 
|  | 3235 | } | 
|  | 3236 |  | 
|  | 3237 | /** | 
|  | 3238 | * Set the CPU read domain on a range of the object. | 
|  | 3239 | * | 
|  | 3240 | * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's | 
|  | 3241 | * not entirely valid.  The page_cpu_valid member of the object flags which | 
|  | 3242 | * pages have been flushed, and will be respected by | 
|  | 3243 | * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping | 
|  | 3244 | * of the whole object. | 
|  | 3245 | * | 
|  | 3246 | * This function returns when the move is complete, including waiting on | 
|  | 3247 | * flushes to occur. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3248 | */ | 
|  | 3249 | static int | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3250 | i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | 
|  | 3251 | uint64_t offset, uint64_t size) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3252 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3253 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3254 | uint32_t old_read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3255 | int i, ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3256 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3257 | if (offset == 0 && size == obj->size) | 
|  | 3258 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 
|  | 3259 |  | 
| Chris Wilson | 2dafb1e | 2010-06-07 14:03:05 +0100 | [diff] [blame] | 3260 | ret = i915_gem_object_flush_gpu_write_domain(obj); | 
|  | 3261 | if (ret) | 
|  | 3262 | return ret; | 
|  | 3263 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3264 | /* Wait on any GPU rendering and flushing to occur. */ | 
|  | 3265 | ret = i915_gem_object_wait_rendering(obj); | 
|  | 3266 | if (ret != 0) | 
|  | 3267 | return ret; | 
|  | 3268 | i915_gem_object_flush_gtt_write_domain(obj); | 
|  | 3269 |  | 
|  | 3270 | /* If we're already fully in the CPU read domain, we're done. */ | 
|  | 3271 | if (obj_priv->page_cpu_valid == NULL && | 
|  | 3272 | (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3273 | return 0; | 
|  | 3274 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3275 | /* Otherwise, create/clear the per-page CPU read domain flag if we're | 
|  | 3276 | * newly adding I915_GEM_DOMAIN_CPU | 
|  | 3277 | */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3278 | if (obj_priv->page_cpu_valid == NULL) { | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3279 | obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, | 
|  | 3280 | GFP_KERNEL); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3281 | if (obj_priv->page_cpu_valid == NULL) | 
|  | 3282 | return -ENOMEM; | 
|  | 3283 | } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) | 
|  | 3284 | memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3285 |  | 
|  | 3286 | /* Flush the cache on any pages that are still invalid from the CPU's | 
|  | 3287 | * perspective. | 
|  | 3288 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3289 | for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; | 
|  | 3290 | i++) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3291 | if (obj_priv->page_cpu_valid[i]) | 
|  | 3292 | continue; | 
|  | 3293 |  | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 3294 | drm_clflush_pages(obj_priv->pages + i, 1); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3295 |  | 
|  | 3296 | obj_priv->page_cpu_valid[i] = 1; | 
|  | 3297 | } | 
|  | 3298 |  | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3299 | /* It should now be out of any other write domains, and we can update | 
|  | 3300 | * the domain values for our changes. | 
|  | 3301 | */ | 
|  | 3302 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); | 
|  | 3303 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3304 | old_read_domains = obj->read_domains; | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3305 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 
|  | 3306 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3307 | trace_i915_gem_object_change_domain(obj, | 
|  | 3308 | old_read_domains, | 
|  | 3309 | obj->write_domain); | 
|  | 3310 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3311 | return 0; | 
|  | 3312 | } | 
|  | 3313 |  | 
|  | 3314 | /** | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3315 | * Pin an object to the GTT and evaluate the relocations landing in it. | 
|  | 3316 | */ | 
|  | 3317 | static int | 
|  | 3318 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 
|  | 3319 | struct drm_file *file_priv, | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3320 | struct drm_i915_gem_exec_object2 *entry, | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3321 | struct drm_i915_gem_relocation_entry *relocs) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3322 | { | 
|  | 3323 | struct drm_device *dev = obj->dev; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 3324 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3325 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3326 | int i, ret; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 3327 | void __iomem *reloc_page; | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3328 | bool need_fence; | 
|  | 3329 |  | 
|  | 3330 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | 
|  | 3331 | obj_priv->tiling_mode != I915_TILING_NONE; | 
|  | 3332 |  | 
|  | 3333 | /* Check fence reg constraints and rebind if necessary */ | 
| Chris Wilson | 808b24d | 2010-05-27 13:18:15 +0100 | [diff] [blame] | 3334 | if (need_fence && | 
|  | 3335 | !i915_gem_object_fence_offset_ok(obj, | 
|  | 3336 | obj_priv->tiling_mode)) { | 
|  | 3337 | ret = i915_gem_object_unbind(obj); | 
|  | 3338 | if (ret) | 
|  | 3339 | return ret; | 
|  | 3340 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3341 |  | 
|  | 3342 | /* Choose the GTT offset for our buffer and put it there. */ | 
|  | 3343 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 
|  | 3344 | if (ret) | 
|  | 3345 | return ret; | 
|  | 3346 |  | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3347 | /* | 
|  | 3348 | * Pre-965 chips need a fence register set up in order to | 
|  | 3349 | * properly handle blits to/from tiled surfaces. | 
|  | 3350 | */ | 
|  | 3351 | if (need_fence) { | 
|  | 3352 | ret = i915_gem_object_get_fence_reg(obj); | 
|  | 3353 | if (ret != 0) { | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3354 | i915_gem_object_unpin(obj); | 
|  | 3355 | return ret; | 
|  | 3356 | } | 
|  | 3357 | } | 
|  | 3358 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3359 | entry->offset = obj_priv->gtt_offset; | 
|  | 3360 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3361 | /* Apply the relocations, using the GTT aperture to avoid cache | 
|  | 3362 | * flushing requirements. | 
|  | 3363 | */ | 
|  | 3364 | for (i = 0; i < entry->relocation_count; i++) { | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3365 | struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3366 | struct drm_gem_object *target_obj; | 
|  | 3367 | struct drm_i915_gem_object *target_obj_priv; | 
| Eric Anholt | 3043c60 | 2008-10-02 12:24:47 -0700 | [diff] [blame] | 3368 | uint32_t reloc_val, reloc_offset; | 
|  | 3369 | uint32_t __iomem *reloc_entry; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3370 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3371 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3372 | reloc->target_handle); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3373 | if (target_obj == NULL) { | 
|  | 3374 | i915_gem_object_unpin(obj); | 
|  | 3375 | return -EBADF; | 
|  | 3376 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3377 | target_obj_priv = to_intel_bo(target_obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3378 |  | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3379 | #if WATCH_RELOC | 
|  | 3380 | DRM_INFO("%s: obj %p offset %08x target %d " | 
|  | 3381 | "read %08x write %08x gtt %08x " | 
|  | 3382 | "presumed %08x delta %08x\n", | 
|  | 3383 | __func__, | 
|  | 3384 | obj, | 
|  | 3385 | (int) reloc->offset, | 
|  | 3386 | (int) reloc->target_handle, | 
|  | 3387 | (int) reloc->read_domains, | 
|  | 3388 | (int) reloc->write_domain, | 
|  | 3389 | (int) target_obj_priv->gtt_offset, | 
|  | 3390 | (int) reloc->presumed_offset, | 
|  | 3391 | reloc->delta); | 
|  | 3392 | #endif | 
|  | 3393 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3394 | /* The target buffer should have appeared before us in the | 
|  | 3395 | * exec_object list, so it should have a GTT space bound by now. | 
|  | 3396 | */ | 
|  | 3397 | if (target_obj_priv->gtt_space == NULL) { | 
|  | 3398 | DRM_ERROR("No GTT space found for object %d\n", | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3399 | reloc->target_handle); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3400 | drm_gem_object_unreference(target_obj); | 
|  | 3401 | i915_gem_object_unpin(obj); | 
|  | 3402 | return -EINVAL; | 
|  | 3403 | } | 
|  | 3404 |  | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3405 | /* Validate that the target is in a valid r/w GPU domain */ | 
| Daniel Vetter | 16edd55 | 2010-02-19 11:52:02 +0100 | [diff] [blame] | 3406 | if (reloc->write_domain & (reloc->write_domain - 1)) { | 
|  | 3407 | DRM_ERROR("reloc with multiple write domains: " | 
|  | 3408 | "obj %p target %d offset %d " | 
|  | 3409 | "read %08x write %08x", | 
|  | 3410 | obj, reloc->target_handle, | 
|  | 3411 | (int) reloc->offset, | 
|  | 3412 | reloc->read_domains, | 
|  | 3413 | reloc->write_domain); | 
|  | 3414 | return -EINVAL; | 
|  | 3415 | } | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3416 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | 
|  | 3417 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | 
|  | 3418 | DRM_ERROR("reloc with read/write CPU domains: " | 
|  | 3419 | "obj %p target %d offset %d " | 
|  | 3420 | "read %08x write %08x", | 
|  | 3421 | obj, reloc->target_handle, | 
|  | 3422 | (int) reloc->offset, | 
|  | 3423 | reloc->read_domains, | 
|  | 3424 | reloc->write_domain); | 
|  | 3425 | drm_gem_object_unreference(target_obj); | 
|  | 3426 | i915_gem_object_unpin(obj); | 
|  | 3427 | return -EINVAL; | 
|  | 3428 | } | 
|  | 3429 | if (reloc->write_domain && target_obj->pending_write_domain && | 
|  | 3430 | reloc->write_domain != target_obj->pending_write_domain) { | 
|  | 3431 | DRM_ERROR("Write domain conflict: " | 
|  | 3432 | "obj %p target %d offset %d " | 
|  | 3433 | "new %08x old %08x\n", | 
|  | 3434 | obj, reloc->target_handle, | 
|  | 3435 | (int) reloc->offset, | 
|  | 3436 | reloc->write_domain, | 
|  | 3437 | target_obj->pending_write_domain); | 
|  | 3438 | drm_gem_object_unreference(target_obj); | 
|  | 3439 | i915_gem_object_unpin(obj); | 
|  | 3440 | return -EINVAL; | 
|  | 3441 | } | 
|  | 3442 |  | 
|  | 3443 | target_obj->pending_read_domains |= reloc->read_domains; | 
|  | 3444 | target_obj->pending_write_domain |= reloc->write_domain; | 
|  | 3445 |  | 
|  | 3446 | /* If the relocation already has the right value in it, no | 
|  | 3447 | * more work needs to be done. | 
|  | 3448 | */ | 
|  | 3449 | if (target_obj_priv->gtt_offset == reloc->presumed_offset) { | 
|  | 3450 | drm_gem_object_unreference(target_obj); | 
|  | 3451 | continue; | 
|  | 3452 | } | 
|  | 3453 |  | 
|  | 3454 | /* Check that the relocation address is valid... */ | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3455 | if (reloc->offset > obj->size - 4) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3456 | DRM_ERROR("Relocation beyond object bounds: " | 
|  | 3457 | "obj %p target %d offset %d size %d.\n", | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3458 | obj, reloc->target_handle, | 
|  | 3459 | (int) reloc->offset, (int) obj->size); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3460 | drm_gem_object_unreference(target_obj); | 
|  | 3461 | i915_gem_object_unpin(obj); | 
|  | 3462 | return -EINVAL; | 
|  | 3463 | } | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3464 | if (reloc->offset & 3) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3465 | DRM_ERROR("Relocation not 4-byte aligned: " | 
|  | 3466 | "obj %p target %d offset %d.\n", | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3467 | obj, reloc->target_handle, | 
|  | 3468 | (int) reloc->offset); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3469 | drm_gem_object_unreference(target_obj); | 
|  | 3470 | i915_gem_object_unpin(obj); | 
|  | 3471 | return -EINVAL; | 
|  | 3472 | } | 
|  | 3473 |  | 
| Chris Wilson | 8542a0b | 2009-09-09 21:15:15 +0100 | [diff] [blame] | 3474 | /* and points to somewhere within the target object. */ | 
| Chris Wilson | cd0b9fb | 2009-09-15 23:23:18 +0100 | [diff] [blame] | 3475 | if (reloc->delta >= target_obj->size) { | 
|  | 3476 | DRM_ERROR("Relocation beyond target object bounds: " | 
|  | 3477 | "obj %p target %d delta %d size %d.\n", | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3478 | obj, reloc->target_handle, | 
| Chris Wilson | cd0b9fb | 2009-09-15 23:23:18 +0100 | [diff] [blame] | 3479 | (int) reloc->delta, (int) target_obj->size); | 
| Chris Wilson | 491152b | 2009-02-11 14:26:32 +0000 | [diff] [blame] | 3480 | drm_gem_object_unreference(target_obj); | 
|  | 3481 | i915_gem_object_unpin(obj); | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 3482 | return -EINVAL; | 
|  | 3483 | } | 
|  | 3484 |  | 
| Eric Anholt | 2ef7eea | 2008-11-10 10:53:25 -0800 | [diff] [blame] | 3485 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 
|  | 3486 | if (ret != 0) { | 
|  | 3487 | drm_gem_object_unreference(target_obj); | 
|  | 3488 | i915_gem_object_unpin(obj); | 
|  | 3489 | return -EINVAL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3490 | } | 
|  | 3491 |  | 
|  | 3492 | /* Map the page containing the relocation we're going to | 
|  | 3493 | * perform. | 
|  | 3494 | */ | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3495 | reloc_offset = obj_priv->gtt_offset + reloc->offset; | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 3496 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 
|  | 3497 | (reloc_offset & | 
| Chris Wilson | fca3ec0 | 2010-08-04 14:34:24 +0100 | [diff] [blame] | 3498 | ~(PAGE_SIZE - 1)), | 
|  | 3499 | KM_USER0); | 
| Eric Anholt | 3043c60 | 2008-10-02 12:24:47 -0700 | [diff] [blame] | 3500 | reloc_entry = (uint32_t __iomem *)(reloc_page + | 
| Keith Packard | 0839ccb | 2008-10-30 19:38:48 -0700 | [diff] [blame] | 3501 | (reloc_offset & (PAGE_SIZE - 1))); | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3502 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3503 |  | 
|  | 3504 | #if WATCH_BUF | 
|  | 3505 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3506 | obj, (unsigned int) reloc->offset, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3507 | readl(reloc_entry), reloc_val); | 
|  | 3508 | #endif | 
|  | 3509 | writel(reloc_val, reloc_entry); | 
| Chris Wilson | fca3ec0 | 2010-08-04 14:34:24 +0100 | [diff] [blame] | 3510 | io_mapping_unmap_atomic(reloc_page, KM_USER0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3511 |  | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3512 | /* The updated presumed offset for this entry will be | 
|  | 3513 | * copied back out to the user. | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3514 | */ | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3515 | reloc->presumed_offset = target_obj_priv->gtt_offset; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3516 |  | 
|  | 3517 | drm_gem_object_unreference(target_obj); | 
|  | 3518 | } | 
|  | 3519 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3520 | #if WATCH_BUF | 
|  | 3521 | if (0) | 
|  | 3522 | i915_gem_dump_object(obj, 128, __func__, ~0); | 
|  | 3523 | #endif | 
|  | 3524 | return 0; | 
|  | 3525 | } | 
|  | 3526 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3527 | /* Throttle our rendering by waiting until the ring has completed our requests | 
|  | 3528 | * emitted over 20 msec ago. | 
|  | 3529 | * | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3530 | * Note that if we were to use the current jiffies each time around the loop, | 
|  | 3531 | * we wouldn't escape the function with any frames outstanding if the time to | 
|  | 3532 | * render a frame was over 20ms. | 
|  | 3533 | * | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3534 | * This should get us reasonable parallelism between CPU and GPU but also | 
|  | 3535 | * relatively low latency when blocking on a particular request to finish. | 
|  | 3536 | */ | 
|  | 3537 | static int | 
|  | 3538 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | 
|  | 3539 | { | 
|  | 3540 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 
|  | 3541 | int ret = 0; | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3542 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3543 |  | 
|  | 3544 | mutex_lock(&dev->struct_mutex); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3545 | while (!list_empty(&i915_file_priv->mm.request_list)) { | 
|  | 3546 | struct drm_i915_gem_request *request; | 
|  | 3547 |  | 
|  | 3548 | request = list_first_entry(&i915_file_priv->mm.request_list, | 
|  | 3549 | struct drm_i915_gem_request, | 
|  | 3550 | client_list); | 
|  | 3551 |  | 
|  | 3552 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 
|  | 3553 | break; | 
|  | 3554 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3555 | ret = i915_wait_request(dev, request->seqno, request->ring); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3556 | if (ret != 0) | 
|  | 3557 | break; | 
|  | 3558 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3559 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3560 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3561 | return ret; | 
|  | 3562 | } | 
|  | 3563 |  | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3564 | static int | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3565 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3566 | uint32_t buffer_count, | 
|  | 3567 | struct drm_i915_gem_relocation_entry **relocs) | 
|  | 3568 | { | 
|  | 3569 | uint32_t reloc_count = 0, reloc_index = 0, i; | 
|  | 3570 | int ret; | 
|  | 3571 |  | 
|  | 3572 | *relocs = NULL; | 
|  | 3573 | for (i = 0; i < buffer_count; i++) { | 
|  | 3574 | if (reloc_count + exec_list[i].relocation_count < reloc_count) | 
|  | 3575 | return -EINVAL; | 
|  | 3576 | reloc_count += exec_list[i].relocation_count; | 
|  | 3577 | } | 
|  | 3578 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 3579 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3580 | if (*relocs == NULL) { | 
|  | 3581 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3582 | return -ENOMEM; | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3583 | } | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3584 |  | 
|  | 3585 | for (i = 0; i < buffer_count; i++) { | 
|  | 3586 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 
|  | 3587 |  | 
|  | 3588 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | 
|  | 3589 |  | 
|  | 3590 | ret = copy_from_user(&(*relocs)[reloc_index], | 
|  | 3591 | user_relocs, | 
|  | 3592 | exec_list[i].relocation_count * | 
|  | 3593 | sizeof(**relocs)); | 
|  | 3594 | if (ret != 0) { | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 3595 | drm_free_large(*relocs); | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3596 | *relocs = NULL; | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 3597 | return -EFAULT; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3598 | } | 
|  | 3599 |  | 
|  | 3600 | reloc_index += exec_list[i].relocation_count; | 
|  | 3601 | } | 
|  | 3602 |  | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 3603 | return 0; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3604 | } | 
|  | 3605 |  | 
|  | 3606 | static int | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3607 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3608 | uint32_t buffer_count, | 
|  | 3609 | struct drm_i915_gem_relocation_entry *relocs) | 
|  | 3610 | { | 
|  | 3611 | uint32_t reloc_count = 0, i; | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 3612 | int ret = 0; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3613 |  | 
| Chris Wilson | 93533c2 | 2010-01-31 10:40:48 +0000 | [diff] [blame] | 3614 | if (relocs == NULL) | 
|  | 3615 | return 0; | 
|  | 3616 |  | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3617 | for (i = 0; i < buffer_count; i++) { | 
|  | 3618 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 3619 | int unwritten; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3620 |  | 
|  | 3621 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | 
|  | 3622 |  | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 3623 | unwritten = copy_to_user(user_relocs, | 
|  | 3624 | &relocs[reloc_count], | 
|  | 3625 | exec_list[i].relocation_count * | 
|  | 3626 | sizeof(*relocs)); | 
|  | 3627 |  | 
|  | 3628 | if (unwritten) { | 
|  | 3629 | ret = -EFAULT; | 
|  | 3630 | goto err; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3631 | } | 
|  | 3632 |  | 
|  | 3633 | reloc_count += exec_list[i].relocation_count; | 
|  | 3634 | } | 
|  | 3635 |  | 
| Florian Mickler | 2bc43b5 | 2009-04-06 22:55:41 +0200 | [diff] [blame] | 3636 | err: | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 3637 | drm_free_large(relocs); | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3638 |  | 
|  | 3639 | return ret; | 
|  | 3640 | } | 
|  | 3641 |  | 
| Chris Wilson | 83d6079 | 2009-06-06 09:45:57 +0100 | [diff] [blame] | 3642 | static int | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3643 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, | 
| Chris Wilson | 83d6079 | 2009-06-06 09:45:57 +0100 | [diff] [blame] | 3644 | uint64_t exec_offset) | 
|  | 3645 | { | 
|  | 3646 | uint32_t exec_start, exec_len; | 
|  | 3647 |  | 
|  | 3648 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 
|  | 3649 | exec_len = (uint32_t) exec->batch_len; | 
|  | 3650 |  | 
|  | 3651 | if ((exec_start | exec_len) & 0x7) | 
|  | 3652 | return -EINVAL; | 
|  | 3653 |  | 
|  | 3654 | if (!exec_start) | 
|  | 3655 | return -EINVAL; | 
|  | 3656 |  | 
|  | 3657 | return 0; | 
|  | 3658 | } | 
|  | 3659 |  | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3660 | static int | 
|  | 3661 | i915_gem_wait_for_pending_flip(struct drm_device *dev, | 
|  | 3662 | struct drm_gem_object **object_list, | 
|  | 3663 | int count) | 
|  | 3664 | { | 
|  | 3665 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 3666 | struct drm_i915_gem_object *obj_priv; | 
|  | 3667 | DEFINE_WAIT(wait); | 
|  | 3668 | int i, ret = 0; | 
|  | 3669 |  | 
|  | 3670 | for (;;) { | 
|  | 3671 | prepare_to_wait(&dev_priv->pending_flip_queue, | 
|  | 3672 | &wait, TASK_INTERRUPTIBLE); | 
|  | 3673 | for (i = 0; i < count; i++) { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3674 | obj_priv = to_intel_bo(object_list[i]); | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3675 | if (atomic_read(&obj_priv->pending_flip) > 0) | 
|  | 3676 | break; | 
|  | 3677 | } | 
|  | 3678 | if (i == count) | 
|  | 3679 | break; | 
|  | 3680 |  | 
|  | 3681 | if (!signal_pending(current)) { | 
|  | 3682 | mutex_unlock(&dev->struct_mutex); | 
|  | 3683 | schedule(); | 
|  | 3684 | mutex_lock(&dev->struct_mutex); | 
|  | 3685 | continue; | 
|  | 3686 | } | 
|  | 3687 | ret = -ERESTARTSYS; | 
|  | 3688 | break; | 
|  | 3689 | } | 
|  | 3690 | finish_wait(&dev_priv->pending_flip_queue, &wait); | 
|  | 3691 |  | 
|  | 3692 | return ret; | 
|  | 3693 | } | 
|  | 3694 |  | 
| Chris Wilson | 43b27f4 | 2010-07-02 08:57:15 +0100 | [diff] [blame] | 3695 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3696 | int | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3697 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 
|  | 3698 | struct drm_file *file_priv, | 
|  | 3699 | struct drm_i915_gem_execbuffer2 *args, | 
|  | 3700 | struct drm_i915_gem_exec_object2 *exec_list) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3701 | { | 
|  | 3702 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3703 | struct drm_gem_object **object_list = NULL; | 
|  | 3704 | struct drm_gem_object *batch_obj; | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3705 | struct drm_i915_gem_object *obj_priv; | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3706 | struct drm_clip_rect *cliprects = NULL; | 
| Chris Wilson | 93533c2 | 2010-01-31 10:40:48 +0000 | [diff] [blame] | 3707 | struct drm_i915_gem_relocation_entry *relocs = NULL; | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3708 | int ret = 0, ret2, i, pinned = 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3709 | uint64_t exec_offset; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3710 | uint32_t seqno, flush_domains, reloc_index; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3711 | int pin_tries, flips; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3712 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3713 | struct intel_ring_buffer *ring = NULL; | 
|  | 3714 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3715 | #if WATCH_EXEC | 
|  | 3716 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 
|  | 3717 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 
|  | 3718 | #endif | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 3719 | if (args->flags & I915_EXEC_BSD) { | 
|  | 3720 | if (!HAS_BSD(dev)) { | 
|  | 3721 | DRM_ERROR("execbuf with wrong flag\n"); | 
|  | 3722 | return -EINVAL; | 
|  | 3723 | } | 
|  | 3724 | ring = &dev_priv->bsd_ring; | 
|  | 3725 | } else { | 
|  | 3726 | ring = &dev_priv->render_ring; | 
|  | 3727 | } | 
|  | 3728 |  | 
| Eric Anholt | 4f481ed | 2008-09-10 14:22:49 -0700 | [diff] [blame] | 3729 | if (args->buffer_count < 1) { | 
|  | 3730 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 
|  | 3731 | return -EINVAL; | 
|  | 3732 | } | 
| Eric Anholt | c8e0f93 | 2009-11-22 03:49:37 +0100 | [diff] [blame] | 3733 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 3734 | if (object_list == NULL) { | 
|  | 3735 | DRM_ERROR("Failed to allocate object list for %d buffers\n", | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3736 | args->buffer_count); | 
|  | 3737 | ret = -ENOMEM; | 
|  | 3738 | goto pre_mutex_err; | 
|  | 3739 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3740 |  | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3741 | if (args->num_cliprects != 0) { | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 3742 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 
|  | 3743 | GFP_KERNEL); | 
| Owain Ainsworth | a40e8d3 | 2010-02-09 14:25:55 +0000 | [diff] [blame] | 3744 | if (cliprects == NULL) { | 
|  | 3745 | ret = -ENOMEM; | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3746 | goto pre_mutex_err; | 
| Owain Ainsworth | a40e8d3 | 2010-02-09 14:25:55 +0000 | [diff] [blame] | 3747 | } | 
| Eric Anholt | 201361a | 2009-03-11 12:30:04 -0700 | [diff] [blame] | 3748 |  | 
|  | 3749 | ret = copy_from_user(cliprects, | 
|  | 3750 | (struct drm_clip_rect __user *) | 
|  | 3751 | (uintptr_t) args->cliprects_ptr, | 
|  | 3752 | sizeof(*cliprects) * args->num_cliprects); | 
|  | 3753 | if (ret != 0) { | 
|  | 3754 | DRM_ERROR("copy %d cliprects failed: %d\n", | 
|  | 3755 | args->num_cliprects, ret); | 
|  | 3756 | goto pre_mutex_err; | 
|  | 3757 | } | 
|  | 3758 | } | 
|  | 3759 |  | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3760 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, | 
|  | 3761 | &relocs); | 
|  | 3762 | if (ret != 0) | 
|  | 3763 | goto pre_mutex_err; | 
|  | 3764 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3765 | mutex_lock(&dev->struct_mutex); | 
|  | 3766 |  | 
|  | 3767 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 3768 |  | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 3769 | if (atomic_read(&dev_priv->mm.wedged)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3770 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | a198bc8 | 2009-02-06 16:55:20 +0000 | [diff] [blame] | 3771 | ret = -EIO; | 
|  | 3772 | goto pre_mutex_err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3773 | } | 
|  | 3774 |  | 
|  | 3775 | if (dev_priv->mm.suspended) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3776 | mutex_unlock(&dev->struct_mutex); | 
| Chris Wilson | a198bc8 | 2009-02-06 16:55:20 +0000 | [diff] [blame] | 3777 | ret = -EBUSY; | 
|  | 3778 | goto pre_mutex_err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3779 | } | 
|  | 3780 |  | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3781 | /* Look up object handles */ | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3782 | flips = 0; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3783 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3784 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 
|  | 3785 | exec_list[i].handle); | 
|  | 3786 | if (object_list[i] == NULL) { | 
|  | 3787 | DRM_ERROR("Invalid object handle %d at index %d\n", | 
|  | 3788 | exec_list[i].handle, i); | 
| Chris Wilson | 0ce907f | 2010-01-23 20:26:35 +0000 | [diff] [blame] | 3789 | /* prevent error path from reading uninitialized data */ | 
|  | 3790 | args->buffer_count = i + 1; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3791 | ret = -EBADF; | 
|  | 3792 | goto err; | 
|  | 3793 | } | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3794 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3795 | obj_priv = to_intel_bo(object_list[i]); | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3796 | if (obj_priv->in_execbuffer) { | 
|  | 3797 | DRM_ERROR("Object %p appears more than once in object list\n", | 
|  | 3798 | object_list[i]); | 
| Chris Wilson | 0ce907f | 2010-01-23 20:26:35 +0000 | [diff] [blame] | 3799 | /* prevent error path from reading uninitialized data */ | 
|  | 3800 | args->buffer_count = i + 1; | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 3801 | ret = -EBADF; | 
|  | 3802 | goto err; | 
|  | 3803 | } | 
|  | 3804 | obj_priv->in_execbuffer = true; | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 3805 | flips += atomic_read(&obj_priv->pending_flip); | 
|  | 3806 | } | 
|  | 3807 |  | 
|  | 3808 | if (flips > 0) { | 
|  | 3809 | ret = i915_gem_wait_for_pending_flip(dev, object_list, | 
|  | 3810 | args->buffer_count); | 
|  | 3811 | if (ret) | 
|  | 3812 | goto err; | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3813 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3814 |  | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3815 | /* Pin and relocate */ | 
|  | 3816 | for (pin_tries = 0; ; pin_tries++) { | 
|  | 3817 | ret = 0; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3818 | reloc_index = 0; | 
|  | 3819 |  | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3820 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3821 | object_list[i]->pending_read_domains = 0; | 
|  | 3822 | object_list[i]->pending_write_domain = 0; | 
|  | 3823 | ret = i915_gem_object_pin_and_relocate(object_list[i], | 
|  | 3824 | file_priv, | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3825 | &exec_list[i], | 
|  | 3826 | &relocs[reloc_index]); | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3827 | if (ret) | 
|  | 3828 | break; | 
|  | 3829 | pinned = i + 1; | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 3830 | reloc_index += exec_list[i].relocation_count; | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3831 | } | 
|  | 3832 | /* success */ | 
|  | 3833 | if (ret == 0) | 
|  | 3834 | break; | 
|  | 3835 |  | 
|  | 3836 | /* error other than GTT full, or we've already tried again */ | 
| Chris Wilson | 2939e1f | 2009-06-06 09:46:03 +0100 | [diff] [blame] | 3837 | if (ret != -ENOSPC || pin_tries >= 1) { | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 3838 | if (ret != -ERESTARTSYS) { | 
|  | 3839 | unsigned long long total_size = 0; | 
| Chris Wilson | 3d1cc47 | 2010-05-27 13:18:19 +0100 | [diff] [blame] | 3840 | int num_fences = 0; | 
|  | 3841 | for (i = 0; i < args->buffer_count; i++) { | 
| Chris Wilson | 43b27f4 | 2010-07-02 08:57:15 +0100 | [diff] [blame] | 3842 | obj_priv = to_intel_bo(object_list[i]); | 
| Chris Wilson | 3d1cc47 | 2010-05-27 13:18:19 +0100 | [diff] [blame] | 3843 |  | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 3844 | total_size += object_list[i]->size; | 
| Chris Wilson | 3d1cc47 | 2010-05-27 13:18:19 +0100 | [diff] [blame] | 3845 | num_fences += | 
|  | 3846 | exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && | 
|  | 3847 | obj_priv->tiling_mode != I915_TILING_NONE; | 
|  | 3848 | } | 
|  | 3849 | DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 3850 | pinned+1, args->buffer_count, | 
| Chris Wilson | 3d1cc47 | 2010-05-27 13:18:19 +0100 | [diff] [blame] | 3851 | total_size, num_fences, | 
|  | 3852 | ret); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 3853 | DRM_ERROR("%d objects [%d pinned], " | 
|  | 3854 | "%d object bytes [%d pinned], " | 
|  | 3855 | "%d/%d gtt bytes\n", | 
|  | 3856 | atomic_read(&dev->object_count), | 
|  | 3857 | atomic_read(&dev->pin_count), | 
|  | 3858 | atomic_read(&dev->object_memory), | 
|  | 3859 | atomic_read(&dev->pin_memory), | 
|  | 3860 | atomic_read(&dev->gtt_memory), | 
|  | 3861 | dev->gtt_total); | 
|  | 3862 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3863 | goto err; | 
|  | 3864 | } | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3865 |  | 
|  | 3866 | /* unpin all of our buffers */ | 
|  | 3867 | for (i = 0; i < pinned; i++) | 
|  | 3868 | i915_gem_object_unpin(object_list[i]); | 
| Eric Anholt | b117763 | 2008-12-10 10:09:41 -0800 | [diff] [blame] | 3869 | pinned = 0; | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3870 |  | 
|  | 3871 | /* evict everyone we can from the aperture */ | 
|  | 3872 | ret = i915_gem_evict_everything(dev); | 
| Chris Wilson | 07f73f6 | 2009-09-14 16:50:30 +0100 | [diff] [blame] | 3873 | if (ret && ret != -ENOSPC) | 
| Keith Packard | ac94a96 | 2008-11-20 23:30:27 -0800 | [diff] [blame] | 3874 | goto err; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3875 | } | 
|  | 3876 |  | 
|  | 3877 | /* Set the pending read domains for the batch buffer to COMMAND */ | 
|  | 3878 | batch_obj = object_list[args->buffer_count-1]; | 
| Chris Wilson | 5f26a2c | 2009-06-06 09:45:58 +0100 | [diff] [blame] | 3879 | if (batch_obj->pending_write_domain) { | 
|  | 3880 | DRM_ERROR("Attempting to use self-modifying batch buffer\n"); | 
|  | 3881 | ret = -EINVAL; | 
|  | 3882 | goto err; | 
|  | 3883 | } | 
|  | 3884 | batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3885 |  | 
| Chris Wilson | 83d6079 | 2009-06-06 09:45:57 +0100 | [diff] [blame] | 3886 | /* Sanity check the batch buffer, prior to moving objects */ | 
|  | 3887 | exec_offset = exec_list[args->buffer_count - 1].offset; | 
|  | 3888 | ret = i915_gem_check_execbuffer (args, exec_offset); | 
|  | 3889 | if (ret != 0) { | 
|  | 3890 | DRM_ERROR("execbuf with invalid offset/length\n"); | 
|  | 3891 | goto err; | 
|  | 3892 | } | 
|  | 3893 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3894 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 3895 |  | 
| Keith Packard | 646f0f6 | 2008-11-20 23:23:03 -0800 | [diff] [blame] | 3896 | /* Zero the global flush/invalidate flags. These | 
|  | 3897 | * will be modified as new domains are computed | 
|  | 3898 | * for each object | 
|  | 3899 | */ | 
|  | 3900 | dev->invalidate_domains = 0; | 
|  | 3901 | dev->flush_domains = 0; | 
| Chris Wilson | 88f356b | 2010-08-04 13:55:32 +0100 | [diff] [blame] | 3902 | dev_priv->flush_rings = 0; | 
| Keith Packard | 646f0f6 | 2008-11-20 23:23:03 -0800 | [diff] [blame] | 3903 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3904 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3905 | struct drm_gem_object *obj = object_list[i]; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3906 |  | 
| Keith Packard | 646f0f6 | 2008-11-20 23:23:03 -0800 | [diff] [blame] | 3907 | /* Compute new gpu domains and update invalidate/flush */ | 
| Eric Anholt | 8b0e378 | 2009-02-19 14:40:50 -0800 | [diff] [blame] | 3908 | i915_gem_object_set_to_gpu_domain(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3909 | } | 
|  | 3910 |  | 
|  | 3911 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 3912 |  | 
| Keith Packard | 646f0f6 | 2008-11-20 23:23:03 -0800 | [diff] [blame] | 3913 | if (dev->invalidate_domains | dev->flush_domains) { | 
|  | 3914 | #if WATCH_EXEC | 
|  | 3915 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | 
|  | 3916 | __func__, | 
|  | 3917 | dev->invalidate_domains, | 
|  | 3918 | dev->flush_domains); | 
|  | 3919 | #endif | 
|  | 3920 | i915_gem_flush(dev, | 
|  | 3921 | dev->invalidate_domains, | 
|  | 3922 | dev->flush_domains); | 
| Chris Wilson | 88f356b | 2010-08-04 13:55:32 +0100 | [diff] [blame] | 3923 | if (dev_priv->flush_rings & FLUSH_RENDER_RING) | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 3924 | (void)i915_add_request(dev, file_priv, | 
| Chris Wilson | 88f356b | 2010-08-04 13:55:32 +0100 | [diff] [blame] | 3925 | dev->flush_domains, | 
|  | 3926 | &dev_priv->render_ring); | 
|  | 3927 | if (dev_priv->flush_rings & FLUSH_BSD_RING) | 
|  | 3928 | (void)i915_add_request(dev, file_priv, | 
|  | 3929 | dev->flush_domains, | 
|  | 3930 | &dev_priv->bsd_ring); | 
| Keith Packard | 646f0f6 | 2008-11-20 23:23:03 -0800 | [diff] [blame] | 3931 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3932 |  | 
| Eric Anholt | efbeed9 | 2009-02-19 14:54:51 -0800 | [diff] [blame] | 3933 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3934 | struct drm_gem_object *obj = object_list[i]; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 3935 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3936 | uint32_t old_write_domain = obj->write_domain; | 
| Eric Anholt | efbeed9 | 2009-02-19 14:54:51 -0800 | [diff] [blame] | 3937 |  | 
|  | 3938 | obj->write_domain = obj->pending_write_domain; | 
| Daniel Vetter | 99fcb76 | 2010-02-07 16:20:18 +0100 | [diff] [blame] | 3939 | if (obj->write_domain) | 
|  | 3940 | list_move_tail(&obj_priv->gpu_write_list, | 
|  | 3941 | &dev_priv->mm.gpu_write_list); | 
|  | 3942 | else | 
|  | 3943 | list_del_init(&obj_priv->gpu_write_list); | 
|  | 3944 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 3945 | trace_i915_gem_object_change_domain(obj, | 
|  | 3946 | obj->read_domains, | 
|  | 3947 | old_write_domain); | 
| Eric Anholt | efbeed9 | 2009-02-19 14:54:51 -0800 | [diff] [blame] | 3948 | } | 
|  | 3949 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3950 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 3951 |  | 
|  | 3952 | #if WATCH_COHERENCY | 
|  | 3953 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3954 | i915_gem_object_check_coherency(object_list[i], | 
|  | 3955 | exec_list[i].handle); | 
|  | 3956 | } | 
|  | 3957 | #endif | 
|  | 3958 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3959 | #if WATCH_EXEC | 
| Ben Gamari | 6911a9b | 2009-04-02 11:24:54 -0700 | [diff] [blame] | 3960 | i915_gem_dump_object(batch_obj, | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3961 | args->batch_len, | 
|  | 3962 | __func__, | 
|  | 3963 | ~0); | 
|  | 3964 | #endif | 
|  | 3965 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3966 | /* Exec the batchbuffer */ | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3967 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, | 
|  | 3968 | cliprects, exec_offset); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3969 | if (ret) { | 
|  | 3970 | DRM_ERROR("dispatch failed %d\n", ret); | 
|  | 3971 | goto err; | 
|  | 3972 | } | 
|  | 3973 |  | 
|  | 3974 | /* | 
|  | 3975 | * Ensure that the commands in the batch buffer are | 
|  | 3976 | * finished before the interrupt fires | 
|  | 3977 | */ | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3978 | flush_domains = i915_retire_commands(dev, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3979 |  | 
|  | 3980 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 3981 |  | 
|  | 3982 | /* | 
|  | 3983 | * Get a seqno representing the execution of the current buffer, | 
|  | 3984 | * which we can wait on.  We would like to mitigate these interrupts, | 
|  | 3985 | * likely by only creating seqnos occasionally (so that we have | 
|  | 3986 | * *some* interrupts representing completion of buffers that we can | 
|  | 3987 | * wait on when trying to clear up gtt space). | 
|  | 3988 | */ | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3989 | seqno = i915_add_request(dev, file_priv, flush_domains, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3990 | BUG_ON(seqno == 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3991 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 3992 | struct drm_gem_object *obj = object_list[i]; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3993 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3994 |  | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 3995 | i915_gem_object_move_to_active(obj, seqno, ring); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 3996 | #if WATCH_LRU | 
|  | 3997 | DRM_INFO("%s: move to exec list %p\n", __func__, obj); | 
|  | 3998 | #endif | 
|  | 3999 | } | 
|  | 4000 | #if WATCH_LRU | 
|  | 4001 | i915_dump_lru(dev, __func__); | 
|  | 4002 | #endif | 
|  | 4003 |  | 
|  | 4004 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 4005 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4006 | err: | 
| Julia Lawall | aad87df | 2008-12-21 16:28:47 +0100 | [diff] [blame] | 4007 | for (i = 0; i < pinned; i++) | 
|  | 4008 | i915_gem_object_unpin(object_list[i]); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4009 |  | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 4010 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 4011 | if (object_list[i]) { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4012 | obj_priv = to_intel_bo(object_list[i]); | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 4013 | obj_priv->in_execbuffer = false; | 
|  | 4014 | } | 
| Julia Lawall | aad87df | 2008-12-21 16:28:47 +0100 | [diff] [blame] | 4015 | drm_gem_object_unreference(object_list[i]); | 
| Kristian Høgsberg | b70d11d | 2009-03-03 14:45:57 -0500 | [diff] [blame] | 4016 | } | 
| Julia Lawall | aad87df | 2008-12-21 16:28:47 +0100 | [diff] [blame] | 4017 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4018 | mutex_unlock(&dev->struct_mutex); | 
|  | 4019 |  | 
| Chris Wilson | 93533c2 | 2010-01-31 10:40:48 +0000 | [diff] [blame] | 4020 | pre_mutex_err: | 
| Eric Anholt | 40a5f0d | 2009-03-12 11:23:52 -0700 | [diff] [blame] | 4021 | /* Copy the updated relocations out regardless of current error | 
|  | 4022 | * state.  Failure to update the relocs would mean that the next | 
|  | 4023 | * time userland calls execbuf, it would do so with presumed offset | 
|  | 4024 | * state that didn't match the actual object state. | 
|  | 4025 | */ | 
|  | 4026 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | 
|  | 4027 | relocs); | 
|  | 4028 | if (ret2 != 0) { | 
|  | 4029 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | 
|  | 4030 |  | 
|  | 4031 | if (ret == 0) | 
|  | 4032 | ret = ret2; | 
|  | 4033 | } | 
|  | 4034 |  | 
| Jesse Barnes | 8e7d2b2 | 2009-05-08 16:13:25 -0700 | [diff] [blame] | 4035 | drm_free_large(object_list); | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 4036 | kfree(cliprects); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4037 |  | 
|  | 4038 | return ret; | 
|  | 4039 | } | 
|  | 4040 |  | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4041 | /* | 
|  | 4042 | * Legacy execbuffer just creates an exec2 list from the original exec object | 
|  | 4043 | * list array and passes it to the real function. | 
|  | 4044 | */ | 
|  | 4045 | int | 
|  | 4046 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 
|  | 4047 | struct drm_file *file_priv) | 
|  | 4048 | { | 
|  | 4049 | struct drm_i915_gem_execbuffer *args = data; | 
|  | 4050 | struct drm_i915_gem_execbuffer2 exec2; | 
|  | 4051 | struct drm_i915_gem_exec_object *exec_list = NULL; | 
|  | 4052 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 
|  | 4053 | int ret, i; | 
|  | 4054 |  | 
|  | 4055 | #if WATCH_EXEC | 
|  | 4056 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 
|  | 4057 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 
|  | 4058 | #endif | 
|  | 4059 |  | 
|  | 4060 | if (args->buffer_count < 1) { | 
|  | 4061 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 
|  | 4062 | return -EINVAL; | 
|  | 4063 | } | 
|  | 4064 |  | 
|  | 4065 | /* Copy in the exec list from userland */ | 
|  | 4066 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | 
|  | 4067 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | 
|  | 4068 | if (exec_list == NULL || exec2_list == NULL) { | 
|  | 4069 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 
|  | 4070 | args->buffer_count); | 
|  | 4071 | drm_free_large(exec_list); | 
|  | 4072 | drm_free_large(exec2_list); | 
|  | 4073 | return -ENOMEM; | 
|  | 4074 | } | 
|  | 4075 | ret = copy_from_user(exec_list, | 
|  | 4076 | (struct drm_i915_relocation_entry __user *) | 
|  | 4077 | (uintptr_t) args->buffers_ptr, | 
|  | 4078 | sizeof(*exec_list) * args->buffer_count); | 
|  | 4079 | if (ret != 0) { | 
|  | 4080 | DRM_ERROR("copy %d exec entries failed %d\n", | 
|  | 4081 | args->buffer_count, ret); | 
|  | 4082 | drm_free_large(exec_list); | 
|  | 4083 | drm_free_large(exec2_list); | 
|  | 4084 | return -EFAULT; | 
|  | 4085 | } | 
|  | 4086 |  | 
|  | 4087 | for (i = 0; i < args->buffer_count; i++) { | 
|  | 4088 | exec2_list[i].handle = exec_list[i].handle; | 
|  | 4089 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | 
|  | 4090 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | 
|  | 4091 | exec2_list[i].alignment = exec_list[i].alignment; | 
|  | 4092 | exec2_list[i].offset = exec_list[i].offset; | 
|  | 4093 | if (!IS_I965G(dev)) | 
|  | 4094 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | 
|  | 4095 | else | 
|  | 4096 | exec2_list[i].flags = 0; | 
|  | 4097 | } | 
|  | 4098 |  | 
|  | 4099 | exec2.buffers_ptr = args->buffers_ptr; | 
|  | 4100 | exec2.buffer_count = args->buffer_count; | 
|  | 4101 | exec2.batch_start_offset = args->batch_start_offset; | 
|  | 4102 | exec2.batch_len = args->batch_len; | 
|  | 4103 | exec2.DR1 = args->DR1; | 
|  | 4104 | exec2.DR4 = args->DR4; | 
|  | 4105 | exec2.num_cliprects = args->num_cliprects; | 
|  | 4106 | exec2.cliprects_ptr = args->cliprects_ptr; | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4107 | exec2.flags = I915_EXEC_RENDER; | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4108 |  | 
|  | 4109 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | 
|  | 4110 | if (!ret) { | 
|  | 4111 | /* Copy the new buffer offsets back to the user's exec list. */ | 
|  | 4112 | for (i = 0; i < args->buffer_count; i++) | 
|  | 4113 | exec_list[i].offset = exec2_list[i].offset; | 
|  | 4114 | /* ... and back out to userspace */ | 
|  | 4115 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 
|  | 4116 | (uintptr_t) args->buffers_ptr, | 
|  | 4117 | exec_list, | 
|  | 4118 | sizeof(*exec_list) * args->buffer_count); | 
|  | 4119 | if (ret) { | 
|  | 4120 | ret = -EFAULT; | 
|  | 4121 | DRM_ERROR("failed to copy %d exec entries " | 
|  | 4122 | "back to user (%d)\n", | 
|  | 4123 | args->buffer_count, ret); | 
|  | 4124 | } | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4125 | } | 
|  | 4126 |  | 
|  | 4127 | drm_free_large(exec_list); | 
|  | 4128 | drm_free_large(exec2_list); | 
|  | 4129 | return ret; | 
|  | 4130 | } | 
|  | 4131 |  | 
|  | 4132 | int | 
|  | 4133 | i915_gem_execbuffer2(struct drm_device *dev, void *data, | 
|  | 4134 | struct drm_file *file_priv) | 
|  | 4135 | { | 
|  | 4136 | struct drm_i915_gem_execbuffer2 *args = data; | 
|  | 4137 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | 
|  | 4138 | int ret; | 
|  | 4139 |  | 
|  | 4140 | #if WATCH_EXEC | 
|  | 4141 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | 
|  | 4142 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | 
|  | 4143 | #endif | 
|  | 4144 |  | 
|  | 4145 | if (args->buffer_count < 1) { | 
|  | 4146 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | 
|  | 4147 | return -EINVAL; | 
|  | 4148 | } | 
|  | 4149 |  | 
|  | 4150 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | 
|  | 4151 | if (exec2_list == NULL) { | 
|  | 4152 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | 
|  | 4153 | args->buffer_count); | 
|  | 4154 | return -ENOMEM; | 
|  | 4155 | } | 
|  | 4156 | ret = copy_from_user(exec2_list, | 
|  | 4157 | (struct drm_i915_relocation_entry __user *) | 
|  | 4158 | (uintptr_t) args->buffers_ptr, | 
|  | 4159 | sizeof(*exec2_list) * args->buffer_count); | 
|  | 4160 | if (ret != 0) { | 
|  | 4161 | DRM_ERROR("copy %d exec entries failed %d\n", | 
|  | 4162 | args->buffer_count, ret); | 
|  | 4163 | drm_free_large(exec2_list); | 
|  | 4164 | return -EFAULT; | 
|  | 4165 | } | 
|  | 4166 |  | 
|  | 4167 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | 
|  | 4168 | if (!ret) { | 
|  | 4169 | /* Copy the new buffer offsets back to the user's exec list. */ | 
|  | 4170 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 
|  | 4171 | (uintptr_t) args->buffers_ptr, | 
|  | 4172 | exec2_list, | 
|  | 4173 | sizeof(*exec2_list) * args->buffer_count); | 
|  | 4174 | if (ret) { | 
|  | 4175 | ret = -EFAULT; | 
|  | 4176 | DRM_ERROR("failed to copy %d exec entries " | 
|  | 4177 | "back to user (%d)\n", | 
|  | 4178 | args->buffer_count, ret); | 
|  | 4179 | } | 
|  | 4180 | } | 
|  | 4181 |  | 
|  | 4182 | drm_free_large(exec2_list); | 
|  | 4183 | return ret; | 
|  | 4184 | } | 
|  | 4185 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4186 | int | 
|  | 4187 | i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | 
|  | 4188 | { | 
|  | 4189 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4190 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4191 | int ret; | 
|  | 4192 |  | 
| Daniel Vetter | 778c354 | 2010-05-13 11:49:44 +0200 | [diff] [blame] | 4193 | BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); | 
|  | 4194 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4195 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
| Chris Wilson | ac0c6b5 | 2010-05-27 13:18:18 +0100 | [diff] [blame] | 4196 |  | 
|  | 4197 | if (obj_priv->gtt_space != NULL) { | 
|  | 4198 | if (alignment == 0) | 
|  | 4199 | alignment = i915_gem_get_gtt_alignment(obj); | 
|  | 4200 | if (obj_priv->gtt_offset & (alignment - 1)) { | 
|  | 4201 | ret = i915_gem_object_unbind(obj); | 
|  | 4202 | if (ret) | 
|  | 4203 | return ret; | 
|  | 4204 | } | 
|  | 4205 | } | 
|  | 4206 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4207 | if (obj_priv->gtt_space == NULL) { | 
|  | 4208 | ret = i915_gem_object_bind_to_gtt(obj, alignment); | 
| Chris Wilson | 9731129 | 2009-09-21 00:22:34 +0100 | [diff] [blame] | 4209 | if (ret) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4210 | return ret; | 
| Chris Wilson | 22c344e | 2009-02-11 14:26:45 +0000 | [diff] [blame] | 4211 | } | 
| Jesse Barnes | 76446ca | 2009-12-17 22:05:42 -0500 | [diff] [blame] | 4212 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4213 | obj_priv->pin_count++; | 
|  | 4214 |  | 
|  | 4215 | /* If the object is not active and not pending a flush, | 
|  | 4216 | * remove it from the inactive list | 
|  | 4217 | */ | 
|  | 4218 | if (obj_priv->pin_count == 1) { | 
|  | 4219 | atomic_inc(&dev->pin_count); | 
|  | 4220 | atomic_add(obj->size, &dev->pin_memory); | 
|  | 4221 | if (!obj_priv->active && | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 4222 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4223 | !list_empty(&obj_priv->list)) | 
|  | 4224 | list_del_init(&obj_priv->list); | 
|  | 4225 | } | 
|  | 4226 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 4227 |  | 
|  | 4228 | return 0; | 
|  | 4229 | } | 
|  | 4230 |  | 
|  | 4231 | void | 
|  | 4232 | i915_gem_object_unpin(struct drm_gem_object *obj) | 
|  | 4233 | { | 
|  | 4234 | struct drm_device *dev = obj->dev; | 
|  | 4235 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4236 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4237 |  | 
|  | 4238 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 4239 | obj_priv->pin_count--; | 
|  | 4240 | BUG_ON(obj_priv->pin_count < 0); | 
|  | 4241 | BUG_ON(obj_priv->gtt_space == NULL); | 
|  | 4242 |  | 
|  | 4243 | /* If the object is no longer pinned, and is | 
|  | 4244 | * neither active nor being flushed, then stick it on | 
|  | 4245 | * the inactive list | 
|  | 4246 | */ | 
|  | 4247 | if (obj_priv->pin_count == 0) { | 
|  | 4248 | if (!obj_priv->active && | 
| Chris Wilson | 21d509e | 2009-06-06 09:46:02 +0100 | [diff] [blame] | 4249 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4250 | list_move_tail(&obj_priv->list, | 
|  | 4251 | &dev_priv->mm.inactive_list); | 
|  | 4252 | atomic_dec(&dev->pin_count); | 
|  | 4253 | atomic_sub(obj->size, &dev->pin_memory); | 
|  | 4254 | } | 
|  | 4255 | i915_verify_inactive(dev, __FILE__, __LINE__); | 
|  | 4256 | } | 
|  | 4257 |  | 
|  | 4258 | int | 
|  | 4259 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 
|  | 4260 | struct drm_file *file_priv) | 
|  | 4261 | { | 
|  | 4262 | struct drm_i915_gem_pin *args = data; | 
|  | 4263 | struct drm_gem_object *obj; | 
|  | 4264 | struct drm_i915_gem_object *obj_priv; | 
|  | 4265 | int ret; | 
|  | 4266 |  | 
|  | 4267 | mutex_lock(&dev->struct_mutex); | 
|  | 4268 |  | 
|  | 4269 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4270 | if (obj == NULL) { | 
|  | 4271 | DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", | 
|  | 4272 | args->handle); | 
|  | 4273 | mutex_unlock(&dev->struct_mutex); | 
|  | 4274 | return -EBADF; | 
|  | 4275 | } | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4276 | obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4277 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 4278 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 
|  | 4279 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4280 | drm_gem_object_unreference(obj); | 
|  | 4281 | mutex_unlock(&dev->struct_mutex); | 
|  | 4282 | return -EINVAL; | 
|  | 4283 | } | 
|  | 4284 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4285 | if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { | 
|  | 4286 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", | 
|  | 4287 | args->handle); | 
| Chris Wilson | 96dec61 | 2009-02-08 19:08:04 +0000 | [diff] [blame] | 4288 | drm_gem_object_unreference(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4289 | mutex_unlock(&dev->struct_mutex); | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4290 | return -EINVAL; | 
|  | 4291 | } | 
|  | 4292 |  | 
|  | 4293 | obj_priv->user_pin_count++; | 
|  | 4294 | obj_priv->pin_filp = file_priv; | 
|  | 4295 | if (obj_priv->user_pin_count == 1) { | 
|  | 4296 | ret = i915_gem_object_pin(obj, args->alignment); | 
|  | 4297 | if (ret != 0) { | 
|  | 4298 | drm_gem_object_unreference(obj); | 
|  | 4299 | mutex_unlock(&dev->struct_mutex); | 
|  | 4300 | return ret; | 
|  | 4301 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4302 | } | 
|  | 4303 |  | 
|  | 4304 | /* XXX - flush the CPU caches for pinned objects | 
|  | 4305 | * as the X server doesn't manage domains yet | 
|  | 4306 | */ | 
| Eric Anholt | e47c68e | 2008-11-14 13:35:19 -0800 | [diff] [blame] | 4307 | i915_gem_object_flush_cpu_write_domain(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4308 | args->offset = obj_priv->gtt_offset; | 
|  | 4309 | drm_gem_object_unreference(obj); | 
|  | 4310 | mutex_unlock(&dev->struct_mutex); | 
|  | 4311 |  | 
|  | 4312 | return 0; | 
|  | 4313 | } | 
|  | 4314 |  | 
|  | 4315 | int | 
|  | 4316 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 
|  | 4317 | struct drm_file *file_priv) | 
|  | 4318 | { | 
|  | 4319 | struct drm_i915_gem_pin *args = data; | 
|  | 4320 | struct drm_gem_object *obj; | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4321 | struct drm_i915_gem_object *obj_priv; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4322 |  | 
|  | 4323 | mutex_lock(&dev->struct_mutex); | 
|  | 4324 |  | 
|  | 4325 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4326 | if (obj == NULL) { | 
|  | 4327 | DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", | 
|  | 4328 | args->handle); | 
|  | 4329 | mutex_unlock(&dev->struct_mutex); | 
|  | 4330 | return -EBADF; | 
|  | 4331 | } | 
|  | 4332 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4333 | obj_priv = to_intel_bo(obj); | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4334 | if (obj_priv->pin_filp != file_priv) { | 
|  | 4335 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | 
|  | 4336 | args->handle); | 
|  | 4337 | drm_gem_object_unreference(obj); | 
|  | 4338 | mutex_unlock(&dev->struct_mutex); | 
|  | 4339 | return -EINVAL; | 
|  | 4340 | } | 
|  | 4341 | obj_priv->user_pin_count--; | 
|  | 4342 | if (obj_priv->user_pin_count == 0) { | 
|  | 4343 | obj_priv->pin_filp = NULL; | 
|  | 4344 | i915_gem_object_unpin(obj); | 
|  | 4345 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4346 |  | 
|  | 4347 | drm_gem_object_unreference(obj); | 
|  | 4348 | mutex_unlock(&dev->struct_mutex); | 
|  | 4349 | return 0; | 
|  | 4350 | } | 
|  | 4351 |  | 
|  | 4352 | int | 
|  | 4353 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 
|  | 4354 | struct drm_file *file_priv) | 
|  | 4355 | { | 
|  | 4356 | struct drm_i915_gem_busy *args = data; | 
|  | 4357 | struct drm_gem_object *obj; | 
|  | 4358 | struct drm_i915_gem_object *obj_priv; | 
|  | 4359 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4360 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4361 | if (obj == NULL) { | 
|  | 4362 | DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", | 
|  | 4363 | args->handle); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4364 | return -EBADF; | 
|  | 4365 | } | 
|  | 4366 |  | 
| Chris Wilson | b1ce786 | 2009-06-06 09:46:00 +0100 | [diff] [blame] | 4367 | mutex_lock(&dev->struct_mutex); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4368 |  | 
| Chris Wilson | 0be555b | 2010-08-04 15:36:30 +0100 | [diff] [blame^] | 4369 | /* Count all active objects as busy, even if they are currently not used | 
|  | 4370 | * by the gpu. Users of this interface expect objects to eventually | 
|  | 4371 | * become non-busy without any further actions, therefore emit any | 
|  | 4372 | * necessary flushes here. | 
| Eric Anholt | c4de0a5 | 2008-12-14 19:05:04 -0800 | [diff] [blame] | 4373 | */ | 
| Chris Wilson | 0be555b | 2010-08-04 15:36:30 +0100 | [diff] [blame^] | 4374 | obj_priv = to_intel_bo(obj); | 
|  | 4375 | args->busy = obj_priv->active; | 
|  | 4376 | if (args->busy) { | 
|  | 4377 | /* Unconditionally flush objects, even when the gpu still uses this | 
|  | 4378 | * object. Userspace calling this function indicates that it wants to | 
|  | 4379 | * use this buffer rather sooner than later, so issuing the required | 
|  | 4380 | * flush earlier is beneficial. | 
|  | 4381 | */ | 
|  | 4382 | if (obj->write_domain) { | 
|  | 4383 | i915_gem_flush(dev, 0, obj->write_domain); | 
|  | 4384 | (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); | 
|  | 4385 | } | 
|  | 4386 |  | 
|  | 4387 | /* Update the active list for the hardware's current position. | 
|  | 4388 | * Otherwise this only updates on a delayed timer or when irqs | 
|  | 4389 | * are actually unmasked, and our working set ends up being | 
|  | 4390 | * larger than required. | 
|  | 4391 | */ | 
|  | 4392 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | 
|  | 4393 |  | 
|  | 4394 | args->busy = obj_priv->active; | 
|  | 4395 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4396 |  | 
|  | 4397 | drm_gem_object_unreference(obj); | 
|  | 4398 | mutex_unlock(&dev->struct_mutex); | 
|  | 4399 | return 0; | 
|  | 4400 | } | 
|  | 4401 |  | 
|  | 4402 | int | 
|  | 4403 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | 
|  | 4404 | struct drm_file *file_priv) | 
|  | 4405 | { | 
|  | 4406 | return i915_gem_ring_throttle(dev, file_priv); | 
|  | 4407 | } | 
|  | 4408 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4409 | int | 
|  | 4410 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | 
|  | 4411 | struct drm_file *file_priv) | 
|  | 4412 | { | 
|  | 4413 | struct drm_i915_gem_madvise *args = data; | 
|  | 4414 | struct drm_gem_object *obj; | 
|  | 4415 | struct drm_i915_gem_object *obj_priv; | 
|  | 4416 |  | 
|  | 4417 | switch (args->madv) { | 
|  | 4418 | case I915_MADV_DONTNEED: | 
|  | 4419 | case I915_MADV_WILLNEED: | 
|  | 4420 | break; | 
|  | 4421 | default: | 
|  | 4422 | return -EINVAL; | 
|  | 4423 | } | 
|  | 4424 |  | 
|  | 4425 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 
|  | 4426 | if (obj == NULL) { | 
|  | 4427 | DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", | 
|  | 4428 | args->handle); | 
|  | 4429 | return -EBADF; | 
|  | 4430 | } | 
|  | 4431 |  | 
|  | 4432 | mutex_lock(&dev->struct_mutex); | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4433 | obj_priv = to_intel_bo(obj); | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4434 |  | 
|  | 4435 | if (obj_priv->pin_count) { | 
|  | 4436 | drm_gem_object_unreference(obj); | 
|  | 4437 | mutex_unlock(&dev->struct_mutex); | 
|  | 4438 |  | 
|  | 4439 | DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); | 
|  | 4440 | return -EINVAL; | 
|  | 4441 | } | 
|  | 4442 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 4443 | if (obj_priv->madv != __I915_MADV_PURGED) | 
|  | 4444 | obj_priv->madv = args->madv; | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4445 |  | 
| Chris Wilson | 2d7ef39 | 2009-09-20 23:13:10 +0100 | [diff] [blame] | 4446 | /* if the object is no longer bound, discard its backing storage */ | 
|  | 4447 | if (i915_gem_object_is_purgeable(obj_priv) && | 
|  | 4448 | obj_priv->gtt_space == NULL) | 
|  | 4449 | i915_gem_object_truncate(obj); | 
|  | 4450 |  | 
| Chris Wilson | bb6baf7 | 2009-09-22 14:24:13 +0100 | [diff] [blame] | 4451 | args->retained = obj_priv->madv != __I915_MADV_PURGED; | 
|  | 4452 |  | 
| Chris Wilson | 3ef94da | 2009-09-14 16:50:29 +0100 | [diff] [blame] | 4453 | drm_gem_object_unreference(obj); | 
|  | 4454 | mutex_unlock(&dev->struct_mutex); | 
|  | 4455 |  | 
|  | 4456 | return 0; | 
|  | 4457 | } | 
|  | 4458 |  | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 4459 | struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, | 
|  | 4460 | size_t size) | 
|  | 4461 | { | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4462 | struct drm_i915_gem_object *obj; | 
|  | 4463 |  | 
|  | 4464 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | 
|  | 4465 | if (obj == NULL) | 
|  | 4466 | return NULL; | 
|  | 4467 |  | 
|  | 4468 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { | 
|  | 4469 | kfree(obj); | 
|  | 4470 | return NULL; | 
|  | 4471 | } | 
|  | 4472 |  | 
|  | 4473 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | 
|  | 4474 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | 
|  | 4475 |  | 
|  | 4476 | obj->agp_type = AGP_USER_MEMORY; | 
| Daniel Vetter | 62b8b21 | 2010-04-09 19:05:08 +0000 | [diff] [blame] | 4477 | obj->base.driver_private = NULL; | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4478 | obj->fence_reg = I915_FENCE_REG_NONE; | 
|  | 4479 | INIT_LIST_HEAD(&obj->list); | 
|  | 4480 | INIT_LIST_HEAD(&obj->gpu_write_list); | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4481 | obj->madv = I915_MADV_WILLNEED; | 
|  | 4482 |  | 
|  | 4483 | trace_i915_gem_object_create(&obj->base); | 
|  | 4484 |  | 
|  | 4485 | return &obj->base; | 
| Daniel Vetter | ac52bc5 | 2010-04-09 19:05:06 +0000 | [diff] [blame] | 4486 | } | 
|  | 4487 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4488 | int i915_gem_init_object(struct drm_gem_object *obj) | 
|  | 4489 | { | 
| Daniel Vetter | c397b90 | 2010-04-09 19:05:07 +0000 | [diff] [blame] | 4490 | BUG(); | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4491 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4492 | return 0; | 
|  | 4493 | } | 
|  | 4494 |  | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4495 | static void i915_gem_free_object_tail(struct drm_gem_object *obj) | 
|  | 4496 | { | 
|  | 4497 | struct drm_device *dev = obj->dev; | 
|  | 4498 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4499 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
|  | 4500 | int ret; | 
|  | 4501 |  | 
|  | 4502 | ret = i915_gem_object_unbind(obj); | 
|  | 4503 | if (ret == -ERESTARTSYS) { | 
|  | 4504 | list_move(&obj_priv->list, | 
|  | 4505 | &dev_priv->mm.deferred_free_list); | 
|  | 4506 | return; | 
|  | 4507 | } | 
|  | 4508 |  | 
|  | 4509 | if (obj_priv->mmap_offset) | 
|  | 4510 | i915_gem_free_mmap_offset(obj); | 
|  | 4511 |  | 
|  | 4512 | drm_gem_object_release(obj); | 
|  | 4513 |  | 
|  | 4514 | kfree(obj_priv->page_cpu_valid); | 
|  | 4515 | kfree(obj_priv->bit_17); | 
|  | 4516 | kfree(obj_priv); | 
|  | 4517 | } | 
|  | 4518 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4519 | void i915_gem_free_object(struct drm_gem_object *obj) | 
|  | 4520 | { | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4521 | struct drm_device *dev = obj->dev; | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4522 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4523 |  | 
| Chris Wilson | 1c5d22f | 2009-08-25 11:15:50 +0100 | [diff] [blame] | 4524 | trace_i915_gem_object_destroy(obj); | 
|  | 4525 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4526 | while (obj_priv->pin_count > 0) | 
|  | 4527 | i915_gem_object_unpin(obj); | 
|  | 4528 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4529 | if (obj_priv->phys_obj) | 
|  | 4530 | i915_gem_detach_phys_object(dev, obj); | 
|  | 4531 |  | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4532 | i915_gem_free_object_tail(obj); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4533 | } | 
|  | 4534 |  | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 4535 | /** Unbinds all inactive objects. */ | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4536 | static int | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 4537 | i915_gem_evict_from_inactive_list(struct drm_device *dev) | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4538 | { | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 4539 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4540 |  | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 4541 | while (!list_empty(&dev_priv->mm.inactive_list)) { | 
|  | 4542 | struct drm_gem_object *obj; | 
|  | 4543 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4544 |  | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 4545 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | 
|  | 4546 | struct drm_i915_gem_object, | 
|  | 4547 | list)->base; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4548 |  | 
|  | 4549 | ret = i915_gem_object_unbind(obj); | 
|  | 4550 | if (ret != 0) { | 
| Chris Wilson | ab5ee57 | 2009-09-20 19:25:47 +0100 | [diff] [blame] | 4551 | DRM_ERROR("Error unbinding object: %d\n", ret); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4552 | return ret; | 
|  | 4553 | } | 
|  | 4554 | } | 
|  | 4555 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4556 | return 0; | 
|  | 4557 | } | 
|  | 4558 |  | 
| Jesse Barnes | 5669fca | 2009-02-17 15:13:31 -0800 | [diff] [blame] | 4559 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4560 | i915_gem_idle(struct drm_device *dev) | 
|  | 4561 | { | 
|  | 4562 | drm_i915_private_t *dev_priv = dev->dev_private; | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4563 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4564 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4565 | mutex_lock(&dev->struct_mutex); | 
|  | 4566 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4567 | if (dev_priv->mm.suspended || | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4568 | (dev_priv->render_ring.gem_object == NULL) || | 
|  | 4569 | (HAS_BSD(dev) && | 
|  | 4570 | dev_priv->bsd_ring.gem_object == NULL)) { | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4571 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4572 | return 0; | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4573 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4574 |  | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4575 | ret = i915_gpu_idle(dev); | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4576 | if (ret) { | 
|  | 4577 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4578 | return ret; | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4579 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4580 |  | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4581 | /* Under UMS, be paranoid and evict. */ | 
|  | 4582 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | 
|  | 4583 | ret = i915_gem_evict_from_inactive_list(dev); | 
|  | 4584 | if (ret) { | 
|  | 4585 | mutex_unlock(&dev->struct_mutex); | 
|  | 4586 | return ret; | 
|  | 4587 | } | 
|  | 4588 | } | 
|  | 4589 |  | 
|  | 4590 | /* Hack!  Don't let anybody do execbuf while we don't control the chip. | 
|  | 4591 | * We need to replace this with a semaphore, or something. | 
|  | 4592 | * And not confound mm.suspended! | 
|  | 4593 | */ | 
|  | 4594 | dev_priv->mm.suspended = 1; | 
|  | 4595 | del_timer(&dev_priv->hangcheck_timer); | 
|  | 4596 |  | 
|  | 4597 | i915_kernel_lost_context(dev); | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4598 | i915_gem_cleanup_ringbuffer(dev); | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4599 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4600 | mutex_unlock(&dev->struct_mutex); | 
|  | 4601 |  | 
| Chris Wilson | 29105cc | 2010-01-07 10:39:13 +0000 | [diff] [blame] | 4602 | /* Cancel the retire work handler, which should be idle now. */ | 
|  | 4603 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 
|  | 4604 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4605 | return 0; | 
|  | 4606 | } | 
|  | 4607 |  | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4608 | /* | 
|  | 4609 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | 
|  | 4610 | * over cache flushing. | 
|  | 4611 | */ | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4612 | static int | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4613 | i915_gem_init_pipe_control(struct drm_device *dev) | 
|  | 4614 | { | 
|  | 4615 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4616 | struct drm_gem_object *obj; | 
|  | 4617 | struct drm_i915_gem_object *obj_priv; | 
|  | 4618 | int ret; | 
|  | 4619 |  | 
| Eric Anholt | 34dc4d4 | 2010-05-07 14:30:03 -0700 | [diff] [blame] | 4620 | obj = i915_gem_alloc_object(dev, 4096); | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4621 | if (obj == NULL) { | 
|  | 4622 | DRM_ERROR("Failed to allocate seqno page\n"); | 
|  | 4623 | ret = -ENOMEM; | 
|  | 4624 | goto err; | 
|  | 4625 | } | 
|  | 4626 | obj_priv = to_intel_bo(obj); | 
|  | 4627 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 
|  | 4628 |  | 
|  | 4629 | ret = i915_gem_object_pin(obj, 4096); | 
|  | 4630 | if (ret) | 
|  | 4631 | goto err_unref; | 
|  | 4632 |  | 
|  | 4633 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | 
|  | 4634 | dev_priv->seqno_page =  kmap(obj_priv->pages[0]); | 
|  | 4635 | if (dev_priv->seqno_page == NULL) | 
|  | 4636 | goto err_unpin; | 
|  | 4637 |  | 
|  | 4638 | dev_priv->seqno_obj = obj; | 
|  | 4639 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | 
|  | 4640 |  | 
|  | 4641 | return 0; | 
|  | 4642 |  | 
|  | 4643 | err_unpin: | 
|  | 4644 | i915_gem_object_unpin(obj); | 
|  | 4645 | err_unref: | 
|  | 4646 | drm_gem_object_unreference(obj); | 
|  | 4647 | err: | 
|  | 4648 | return ret; | 
|  | 4649 | } | 
|  | 4650 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4651 |  | 
|  | 4652 | static void | 
| Jesse Barnes | e552eb7 | 2010-04-21 11:39:23 -0700 | [diff] [blame] | 4653 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | 
|  | 4654 | { | 
|  | 4655 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4656 | struct drm_gem_object *obj; | 
|  | 4657 | struct drm_i915_gem_object *obj_priv; | 
|  | 4658 |  | 
|  | 4659 | obj = dev_priv->seqno_obj; | 
|  | 4660 | obj_priv = to_intel_bo(obj); | 
|  | 4661 | kunmap(obj_priv->pages[0]); | 
|  | 4662 | i915_gem_object_unpin(obj); | 
|  | 4663 | drm_gem_object_unreference(obj); | 
|  | 4664 | dev_priv->seqno_obj = NULL; | 
|  | 4665 |  | 
|  | 4666 | dev_priv->seqno_page = NULL; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4667 | } | 
|  | 4668 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4669 | int | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4670 | i915_gem_init_ringbuffer(struct drm_device *dev) | 
|  | 4671 | { | 
|  | 4672 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4673 | int ret; | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4674 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4675 | dev_priv->render_ring = render_ring; | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4676 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4677 | if (!I915_NEED_GFX_HWS(dev)) { | 
|  | 4678 | dev_priv->render_ring.status_page.page_addr | 
|  | 4679 | = dev_priv->status_page_dmah->vaddr; | 
|  | 4680 | memset(dev_priv->render_ring.status_page.page_addr, | 
|  | 4681 | 0, PAGE_SIZE); | 
|  | 4682 | } | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4683 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4684 | if (HAS_PIPE_CONTROL(dev)) { | 
|  | 4685 | ret = i915_gem_init_pipe_control(dev); | 
|  | 4686 | if (ret) | 
|  | 4687 | return ret; | 
|  | 4688 | } | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4689 |  | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4690 | ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4691 | if (ret) | 
|  | 4692 | goto cleanup_pipe_control; | 
|  | 4693 |  | 
|  | 4694 | if (HAS_BSD(dev)) { | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4695 | dev_priv->bsd_ring = bsd_ring; | 
|  | 4696 | ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4697 | if (ret) | 
|  | 4698 | goto cleanup_render_ring; | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4699 | } | 
| Chris Wilson | 68f95ba | 2010-05-27 13:18:22 +0100 | [diff] [blame] | 4700 |  | 
|  | 4701 | return 0; | 
|  | 4702 |  | 
|  | 4703 | cleanup_render_ring: | 
|  | 4704 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 
|  | 4705 | cleanup_pipe_control: | 
|  | 4706 | if (HAS_PIPE_CONTROL(dev)) | 
|  | 4707 | i915_gem_cleanup_pipe_control(dev); | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4708 | return ret; | 
|  | 4709 | } | 
|  | 4710 |  | 
|  | 4711 | void | 
|  | 4712 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) | 
|  | 4713 | { | 
|  | 4714 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4715 |  | 
|  | 4716 | intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4717 | if (HAS_BSD(dev)) | 
|  | 4718 | intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); | 
| Zou Nan hai | 8187a2b | 2010-05-21 09:08:55 +0800 | [diff] [blame] | 4719 | if (HAS_PIPE_CONTROL(dev)) | 
|  | 4720 | i915_gem_cleanup_pipe_control(dev); | 
|  | 4721 | } | 
|  | 4722 |  | 
|  | 4723 | int | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4724 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | 
|  | 4725 | struct drm_file *file_priv) | 
|  | 4726 | { | 
|  | 4727 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4728 | int ret; | 
|  | 4729 |  | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4730 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4731 | return 0; | 
|  | 4732 |  | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 4733 | if (atomic_read(&dev_priv->mm.wedged)) { | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4734 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); | 
| Ben Gamari | ba1234d | 2009-09-14 17:48:47 -0400 | [diff] [blame] | 4735 | atomic_set(&dev_priv->mm.wedged, 0); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4736 | } | 
|  | 4737 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4738 | mutex_lock(&dev->struct_mutex); | 
| Eric Anholt | 9bb2d6f | 2008-12-23 18:42:32 -0800 | [diff] [blame] | 4739 | dev_priv->mm.suspended = 0; | 
|  | 4740 |  | 
|  | 4741 | ret = i915_gem_init_ringbuffer(dev); | 
| Wu Fengguang | d816f6a | 2009-04-18 10:43:32 +0800 | [diff] [blame] | 4742 | if (ret != 0) { | 
|  | 4743 | mutex_unlock(&dev->struct_mutex); | 
| Eric Anholt | 9bb2d6f | 2008-12-23 18:42:32 -0800 | [diff] [blame] | 4744 | return ret; | 
| Wu Fengguang | d816f6a | 2009-04-18 10:43:32 +0800 | [diff] [blame] | 4745 | } | 
| Eric Anholt | 9bb2d6f | 2008-12-23 18:42:32 -0800 | [diff] [blame] | 4746 |  | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 4747 | spin_lock(&dev_priv->mm.active_list_lock); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4748 | BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4749 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 4750 | spin_unlock(&dev_priv->mm.active_list_lock); | 
|  | 4751 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4752 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | 
|  | 4753 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4754 | BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4755 | BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4756 | mutex_unlock(&dev->struct_mutex); | 
| Kristian Høgsberg | dbb19d3 | 2008-08-20 11:04:27 -0400 | [diff] [blame] | 4757 |  | 
| Chris Wilson | 5f35308 | 2010-06-07 14:03:03 +0100 | [diff] [blame] | 4758 | ret = drm_irq_install(dev); | 
|  | 4759 | if (ret) | 
|  | 4760 | goto cleanup_ringbuffer; | 
| Kristian Høgsberg | dbb19d3 | 2008-08-20 11:04:27 -0400 | [diff] [blame] | 4761 |  | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4762 | return 0; | 
| Chris Wilson | 5f35308 | 2010-06-07 14:03:03 +0100 | [diff] [blame] | 4763 |  | 
|  | 4764 | cleanup_ringbuffer: | 
|  | 4765 | mutex_lock(&dev->struct_mutex); | 
|  | 4766 | i915_gem_cleanup_ringbuffer(dev); | 
|  | 4767 | dev_priv->mm.suspended = 1; | 
|  | 4768 | mutex_unlock(&dev->struct_mutex); | 
|  | 4769 |  | 
|  | 4770 | return ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4771 | } | 
|  | 4772 |  | 
|  | 4773 | int | 
|  | 4774 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 
|  | 4775 | struct drm_file *file_priv) | 
|  | 4776 | { | 
| Jesse Barnes | 79e5394 | 2008-11-07 14:24:08 -0800 | [diff] [blame] | 4777 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4778 | return 0; | 
|  | 4779 |  | 
| Kristian Høgsberg | dbb19d3 | 2008-08-20 11:04:27 -0400 | [diff] [blame] | 4780 | drm_irq_uninstall(dev); | 
| Linus Torvalds | e6890f6 | 2009-09-08 17:09:24 -0700 | [diff] [blame] | 4781 | return i915_gem_idle(dev); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4782 | } | 
|  | 4783 |  | 
|  | 4784 | void | 
|  | 4785 | i915_gem_lastclose(struct drm_device *dev) | 
|  | 4786 | { | 
|  | 4787 | int ret; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4788 |  | 
| Eric Anholt | e806b49 | 2009-01-22 09:56:58 -0800 | [diff] [blame] | 4789 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4790 | return; | 
|  | 4791 |  | 
| Keith Packard | 6dbe277 | 2008-10-14 21:41:13 -0700 | [diff] [blame] | 4792 | ret = i915_gem_idle(dev); | 
|  | 4793 | if (ret) | 
|  | 4794 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4795 | } | 
|  | 4796 |  | 
|  | 4797 | void | 
|  | 4798 | i915_gem_load(struct drm_device *dev) | 
|  | 4799 | { | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4800 | int i; | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4801 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4802 |  | 
| Carl Worth | 5e118f4 | 2009-03-20 11:54:25 -0700 | [diff] [blame] | 4803 | spin_lock_init(&dev_priv->mm.active_list_lock); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4804 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 
| Daniel Vetter | 99fcb76 | 2010-02-07 16:20:18 +0100 | [diff] [blame] | 4805 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4806 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 
| Eric Anholt | a09ba7f | 2009-08-29 12:49:51 -0700 | [diff] [blame] | 4807 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 
| Chris Wilson | be72615 | 2010-07-23 23:18:50 +0100 | [diff] [blame] | 4808 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 4809 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 
|  | 4810 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 4811 | if (HAS_BSD(dev)) { | 
|  | 4812 | INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); | 
|  | 4813 | INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); | 
|  | 4814 | } | 
| Daniel Vetter | 007cc8a | 2010-04-28 11:02:31 +0200 | [diff] [blame] | 4815 | for (i = 0; i < 16; i++) | 
|  | 4816 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4817 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 
|  | 4818 | i915_gem_retire_work_handler); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 4819 | spin_lock(&shrink_list_lock); | 
|  | 4820 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | 
|  | 4821 | spin_unlock(&shrink_list_lock); | 
|  | 4822 |  | 
| Dave Airlie | 9440012 | 2010-07-20 13:15:31 +1000 | [diff] [blame] | 4823 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ | 
|  | 4824 | if (IS_GEN3(dev)) { | 
|  | 4825 | u32 tmp = I915_READ(MI_ARB_STATE); | 
|  | 4826 | if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { | 
|  | 4827 | /* arb state is a masked write, so set bit + bit in mask */ | 
|  | 4828 | tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); | 
|  | 4829 | I915_WRITE(MI_ARB_STATE, tmp); | 
|  | 4830 | } | 
|  | 4831 | } | 
|  | 4832 |  | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4833 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 
| Eric Anholt | b397c83 | 2010-01-26 09:43:10 -0800 | [diff] [blame] | 4834 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 
|  | 4835 | dev_priv->fence_reg_start = 3; | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4836 |  | 
| Jesse Barnes | 0f973f2 | 2009-01-26 17:10:45 -0800 | [diff] [blame] | 4837 | if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 
| Jesse Barnes | de151cf | 2008-11-12 10:03:55 -0800 | [diff] [blame] | 4838 | dev_priv->num_fence_regs = 16; | 
|  | 4839 | else | 
|  | 4840 | dev_priv->num_fence_regs = 8; | 
|  | 4841 |  | 
| Grégoire Henry | b5aa8a0 | 2009-06-23 15:41:02 +0200 | [diff] [blame] | 4842 | /* Initialize fence registers to zero */ | 
|  | 4843 | if (IS_I965G(dev)) { | 
|  | 4844 | for (i = 0; i < 16; i++) | 
|  | 4845 | I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); | 
|  | 4846 | } else { | 
|  | 4847 | for (i = 0; i < 8; i++) | 
|  | 4848 | I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); | 
|  | 4849 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 
|  | 4850 | for (i = 0; i < 8; i++) | 
|  | 4851 | I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); | 
|  | 4852 | } | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4853 | i915_gem_detect_bit_6_swizzle(dev); | 
| Kristian Høgsberg | 6b95a20 | 2009-11-18 11:25:18 -0500 | [diff] [blame] | 4854 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 
| Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 4855 | } | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4856 |  | 
|  | 4857 | /* | 
|  | 4858 | * Create a physically contiguous memory object for this object | 
|  | 4859 | * e.g. for cursor + overlay regs | 
|  | 4860 | */ | 
|  | 4861 | int i915_gem_init_phys_object(struct drm_device *dev, | 
|  | 4862 | int id, int size) | 
|  | 4863 | { | 
|  | 4864 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4865 | struct drm_i915_gem_phys_object *phys_obj; | 
|  | 4866 | int ret; | 
|  | 4867 |  | 
|  | 4868 | if (dev_priv->mm.phys_objs[id - 1] || !size) | 
|  | 4869 | return 0; | 
|  | 4870 |  | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 4871 | phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4872 | if (!phys_obj) | 
|  | 4873 | return -ENOMEM; | 
|  | 4874 |  | 
|  | 4875 | phys_obj->id = id; | 
|  | 4876 |  | 
| Zhenyu Wang | e6be8d9 | 2010-01-05 11:25:05 +0800 | [diff] [blame] | 4877 | phys_obj->handle = drm_pci_alloc(dev, size, 0); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4878 | if (!phys_obj->handle) { | 
|  | 4879 | ret = -ENOMEM; | 
|  | 4880 | goto kfree_obj; | 
|  | 4881 | } | 
|  | 4882 | #ifdef CONFIG_X86 | 
|  | 4883 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | 
|  | 4884 | #endif | 
|  | 4885 |  | 
|  | 4886 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | 
|  | 4887 |  | 
|  | 4888 | return 0; | 
|  | 4889 | kfree_obj: | 
| Eric Anholt | 9a298b2 | 2009-03-24 12:23:04 -0700 | [diff] [blame] | 4890 | kfree(phys_obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4891 | return ret; | 
|  | 4892 | } | 
|  | 4893 |  | 
|  | 4894 | void i915_gem_free_phys_object(struct drm_device *dev, int id) | 
|  | 4895 | { | 
|  | 4896 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4897 | struct drm_i915_gem_phys_object *phys_obj; | 
|  | 4898 |  | 
|  | 4899 | if (!dev_priv->mm.phys_objs[id - 1]) | 
|  | 4900 | return; | 
|  | 4901 |  | 
|  | 4902 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | 
|  | 4903 | if (phys_obj->cur_obj) { | 
|  | 4904 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | 
|  | 4905 | } | 
|  | 4906 |  | 
|  | 4907 | #ifdef CONFIG_X86 | 
|  | 4908 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | 
|  | 4909 | #endif | 
|  | 4910 | drm_pci_free(dev, phys_obj->handle); | 
|  | 4911 | kfree(phys_obj); | 
|  | 4912 | dev_priv->mm.phys_objs[id - 1] = NULL; | 
|  | 4913 | } | 
|  | 4914 |  | 
|  | 4915 | void i915_gem_free_all_phys_object(struct drm_device *dev) | 
|  | 4916 | { | 
|  | 4917 | int i; | 
|  | 4918 |  | 
| Dave Airlie | 260883c | 2009-01-22 17:58:49 +1000 | [diff] [blame] | 4919 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4920 | i915_gem_free_phys_object(dev, i); | 
|  | 4921 | } | 
|  | 4922 |  | 
|  | 4923 | void i915_gem_detach_phys_object(struct drm_device *dev, | 
|  | 4924 | struct drm_gem_object *obj) | 
|  | 4925 | { | 
|  | 4926 | struct drm_i915_gem_object *obj_priv; | 
|  | 4927 | int i; | 
|  | 4928 | int ret; | 
|  | 4929 | int page_count; | 
|  | 4930 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4931 | obj_priv = to_intel_bo(obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4932 | if (!obj_priv->phys_obj) | 
|  | 4933 | return; | 
|  | 4934 |  | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 4935 | ret = i915_gem_object_get_pages(obj, 0); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4936 | if (ret) | 
|  | 4937 | goto out; | 
|  | 4938 |  | 
|  | 4939 | page_count = obj->size / PAGE_SIZE; | 
|  | 4940 |  | 
|  | 4941 | for (i = 0; i < page_count; i++) { | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 4942 | char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4943 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 
|  | 4944 |  | 
|  | 4945 | memcpy(dst, src, PAGE_SIZE); | 
|  | 4946 | kunmap_atomic(dst, KM_USER0); | 
|  | 4947 | } | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 4948 | drm_clflush_pages(obj_priv->pages, page_count); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4949 | drm_agp_chipset_flush(dev); | 
| Chris Wilson | d78b47b | 2009-06-17 21:52:49 +0100 | [diff] [blame] | 4950 |  | 
|  | 4951 | i915_gem_object_put_pages(obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4952 | out: | 
|  | 4953 | obj_priv->phys_obj->cur_obj = NULL; | 
|  | 4954 | obj_priv->phys_obj = NULL; | 
|  | 4955 | } | 
|  | 4956 |  | 
|  | 4957 | int | 
|  | 4958 | i915_gem_attach_phys_object(struct drm_device *dev, | 
|  | 4959 | struct drm_gem_object *obj, int id) | 
|  | 4960 | { | 
|  | 4961 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 4962 | struct drm_i915_gem_object *obj_priv; | 
|  | 4963 | int ret = 0; | 
|  | 4964 | int page_count; | 
|  | 4965 | int i; | 
|  | 4966 |  | 
|  | 4967 | if (id > I915_MAX_PHYS_OBJECT) | 
|  | 4968 | return -EINVAL; | 
|  | 4969 |  | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 4970 | obj_priv = to_intel_bo(obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4971 |  | 
|  | 4972 | if (obj_priv->phys_obj) { | 
|  | 4973 | if (obj_priv->phys_obj->id == id) | 
|  | 4974 | return 0; | 
|  | 4975 | i915_gem_detach_phys_object(dev, obj); | 
|  | 4976 | } | 
|  | 4977 |  | 
|  | 4978 |  | 
|  | 4979 | /* create a new object */ | 
|  | 4980 | if (!dev_priv->mm.phys_objs[id - 1]) { | 
|  | 4981 | ret = i915_gem_init_phys_object(dev, id, | 
|  | 4982 | obj->size); | 
|  | 4983 | if (ret) { | 
| Linus Torvalds | aeb565d | 2009-01-26 10:01:53 -0800 | [diff] [blame] | 4984 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4985 | goto out; | 
|  | 4986 | } | 
|  | 4987 | } | 
|  | 4988 |  | 
|  | 4989 | /* bind to the object */ | 
|  | 4990 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 
|  | 4991 | obj_priv->phys_obj->cur_obj = obj; | 
|  | 4992 |  | 
| Chris Wilson | 4bdadb9 | 2010-01-27 13:36:32 +0000 | [diff] [blame] | 4993 | ret = i915_gem_object_get_pages(obj, 0); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 4994 | if (ret) { | 
|  | 4995 | DRM_ERROR("failed to get page list\n"); | 
|  | 4996 | goto out; | 
|  | 4997 | } | 
|  | 4998 |  | 
|  | 4999 | page_count = obj->size / PAGE_SIZE; | 
|  | 5000 |  | 
|  | 5001 | for (i = 0; i < page_count; i++) { | 
| Eric Anholt | 856fa19 | 2009-03-19 14:10:50 -0700 | [diff] [blame] | 5002 | char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 5003 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 
|  | 5004 |  | 
|  | 5005 | memcpy(dst, src, PAGE_SIZE); | 
|  | 5006 | kunmap_atomic(src, KM_USER0); | 
|  | 5007 | } | 
|  | 5008 |  | 
| Chris Wilson | d78b47b | 2009-06-17 21:52:49 +0100 | [diff] [blame] | 5009 | i915_gem_object_put_pages(obj); | 
|  | 5010 |  | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 5011 | return 0; | 
|  | 5012 | out: | 
|  | 5013 | return ret; | 
|  | 5014 | } | 
|  | 5015 |  | 
|  | 5016 | static int | 
|  | 5017 | i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 
|  | 5018 | struct drm_i915_gem_pwrite *args, | 
|  | 5019 | struct drm_file *file_priv) | 
|  | 5020 | { | 
| Daniel Vetter | 23010e4 | 2010-03-08 13:35:02 +0100 | [diff] [blame] | 5021 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 5022 | void *obj_addr; | 
|  | 5023 | int ret; | 
|  | 5024 | char __user *user_data; | 
|  | 5025 |  | 
|  | 5026 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 
|  | 5027 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | 
|  | 5028 |  | 
| Zhao Yakui | 44d98a6 | 2009-10-09 11:39:40 +0800 | [diff] [blame] | 5029 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); | 
| Dave Airlie | 71acb5e | 2008-12-30 20:31:46 +1000 | [diff] [blame] | 5030 | ret = copy_from_user(obj_addr, user_data, args->size); | 
|  | 5031 | if (ret) | 
|  | 5032 | return -EFAULT; | 
|  | 5033 |  | 
|  | 5034 | drm_agp_chipset_flush(dev); | 
|  | 5035 | return 0; | 
|  | 5036 | } | 
| Eric Anholt | b962442 | 2009-06-03 07:27:35 +0000 | [diff] [blame] | 5037 |  | 
|  | 5038 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | 
|  | 5039 | { | 
|  | 5040 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | 
|  | 5041 |  | 
|  | 5042 | /* Clean up our request list when the client is going away, so that | 
|  | 5043 | * later retire_requests won't dereference our soon-to-be-gone | 
|  | 5044 | * file_priv. | 
|  | 5045 | */ | 
|  | 5046 | mutex_lock(&dev->struct_mutex); | 
|  | 5047 | while (!list_empty(&i915_file_priv->mm.request_list)) | 
|  | 5048 | list_del_init(i915_file_priv->mm.request_list.next); | 
|  | 5049 | mutex_unlock(&dev->struct_mutex); | 
|  | 5050 | } | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5051 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5052 | static int | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5053 | i915_gpu_is_active(struct drm_device *dev) | 
|  | 5054 | { | 
|  | 5055 | drm_i915_private_t *dev_priv = dev->dev_private; | 
|  | 5056 | int lists_empty; | 
|  | 5057 |  | 
|  | 5058 | spin_lock(&dev_priv->mm.active_list_lock); | 
|  | 5059 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 
| Zou Nan hai | 852835f | 2010-05-21 09:08:56 +0800 | [diff] [blame] | 5060 | list_empty(&dev_priv->render_ring.active_list); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 5061 | if (HAS_BSD(dev)) | 
|  | 5062 | lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5063 | spin_unlock(&dev_priv->mm.active_list_lock); | 
|  | 5064 |  | 
|  | 5065 | return !lists_empty; | 
|  | 5066 | } | 
|  | 5067 |  | 
|  | 5068 | static int | 
| Dave Chinner | 7f8275d | 2010-07-19 14:56:17 +1000 | [diff] [blame] | 5069 | i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5070 | { | 
|  | 5071 | drm_i915_private_t *dev_priv, *next_dev; | 
|  | 5072 | struct drm_i915_gem_object *obj_priv, *next_obj; | 
|  | 5073 | int cnt = 0; | 
|  | 5074 | int would_deadlock = 1; | 
|  | 5075 |  | 
|  | 5076 | /* "fast-path" to count number of available objects */ | 
|  | 5077 | if (nr_to_scan == 0) { | 
|  | 5078 | spin_lock(&shrink_list_lock); | 
|  | 5079 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | 
|  | 5080 | struct drm_device *dev = dev_priv->dev; | 
|  | 5081 |  | 
|  | 5082 | if (mutex_trylock(&dev->struct_mutex)) { | 
|  | 5083 | list_for_each_entry(obj_priv, | 
|  | 5084 | &dev_priv->mm.inactive_list, | 
|  | 5085 | list) | 
|  | 5086 | cnt++; | 
|  | 5087 | mutex_unlock(&dev->struct_mutex); | 
|  | 5088 | } | 
|  | 5089 | } | 
|  | 5090 | spin_unlock(&shrink_list_lock); | 
|  | 5091 |  | 
|  | 5092 | return (cnt / 100) * sysctl_vfs_cache_pressure; | 
|  | 5093 | } | 
|  | 5094 |  | 
|  | 5095 | spin_lock(&shrink_list_lock); | 
|  | 5096 |  | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5097 | rescan: | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5098 | /* first scan for clean buffers */ | 
|  | 5099 | list_for_each_entry_safe(dev_priv, next_dev, | 
|  | 5100 | &shrink_list, mm.shrink_list) { | 
|  | 5101 | struct drm_device *dev = dev_priv->dev; | 
|  | 5102 |  | 
|  | 5103 | if (! mutex_trylock(&dev->struct_mutex)) | 
|  | 5104 | continue; | 
|  | 5105 |  | 
|  | 5106 | spin_unlock(&shrink_list_lock); | 
| Chris Wilson | b09a1fe | 2010-07-23 23:18:49 +0100 | [diff] [blame] | 5107 | i915_gem_retire_requests(dev); | 
| Zou Nan hai | d1b851f | 2010-05-21 09:08:57 +0800 | [diff] [blame] | 5108 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5109 | list_for_each_entry_safe(obj_priv, next_obj, | 
|  | 5110 | &dev_priv->mm.inactive_list, | 
|  | 5111 | list) { | 
|  | 5112 | if (i915_gem_object_is_purgeable(obj_priv)) { | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 5113 | i915_gem_object_unbind(&obj_priv->base); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5114 | if (--nr_to_scan <= 0) | 
|  | 5115 | break; | 
|  | 5116 | } | 
|  | 5117 | } | 
|  | 5118 |  | 
|  | 5119 | spin_lock(&shrink_list_lock); | 
|  | 5120 | mutex_unlock(&dev->struct_mutex); | 
|  | 5121 |  | 
| Chris Wilson | 963b483 | 2009-09-20 23:03:54 +0100 | [diff] [blame] | 5122 | would_deadlock = 0; | 
|  | 5123 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5124 | if (nr_to_scan <= 0) | 
|  | 5125 | break; | 
|  | 5126 | } | 
|  | 5127 |  | 
|  | 5128 | /* second pass, evict/count anything still on the inactive list */ | 
|  | 5129 | list_for_each_entry_safe(dev_priv, next_dev, | 
|  | 5130 | &shrink_list, mm.shrink_list) { | 
|  | 5131 | struct drm_device *dev = dev_priv->dev; | 
|  | 5132 |  | 
|  | 5133 | if (! mutex_trylock(&dev->struct_mutex)) | 
|  | 5134 | continue; | 
|  | 5135 |  | 
|  | 5136 | spin_unlock(&shrink_list_lock); | 
|  | 5137 |  | 
|  | 5138 | list_for_each_entry_safe(obj_priv, next_obj, | 
|  | 5139 | &dev_priv->mm.inactive_list, | 
|  | 5140 | list) { | 
|  | 5141 | if (nr_to_scan > 0) { | 
| Daniel Vetter | a8089e8 | 2010-04-09 19:05:09 +0000 | [diff] [blame] | 5142 | i915_gem_object_unbind(&obj_priv->base); | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5143 | nr_to_scan--; | 
|  | 5144 | } else | 
|  | 5145 | cnt++; | 
|  | 5146 | } | 
|  | 5147 |  | 
|  | 5148 | spin_lock(&shrink_list_lock); | 
|  | 5149 | mutex_unlock(&dev->struct_mutex); | 
|  | 5150 |  | 
|  | 5151 | would_deadlock = 0; | 
|  | 5152 | } | 
|  | 5153 |  | 
| Chris Wilson | 1637ef4 | 2010-04-20 17:10:35 +0100 | [diff] [blame] | 5154 | if (nr_to_scan) { | 
|  | 5155 | int active = 0; | 
|  | 5156 |  | 
|  | 5157 | /* | 
|  | 5158 | * We are desperate for pages, so as a last resort, wait | 
|  | 5159 | * for the GPU to finish and discard whatever we can. | 
|  | 5160 | * This has a dramatic impact to reduce the number of | 
|  | 5161 | * OOM-killer events whilst running the GPU aggressively. | 
|  | 5162 | */ | 
|  | 5163 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | 
|  | 5164 | struct drm_device *dev = dev_priv->dev; | 
|  | 5165 |  | 
|  | 5166 | if (!mutex_trylock(&dev->struct_mutex)) | 
|  | 5167 | continue; | 
|  | 5168 |  | 
|  | 5169 | spin_unlock(&shrink_list_lock); | 
|  | 5170 |  | 
|  | 5171 | if (i915_gpu_is_active(dev)) { | 
|  | 5172 | i915_gpu_idle(dev); | 
|  | 5173 | active++; | 
|  | 5174 | } | 
|  | 5175 |  | 
|  | 5176 | spin_lock(&shrink_list_lock); | 
|  | 5177 | mutex_unlock(&dev->struct_mutex); | 
|  | 5178 | } | 
|  | 5179 |  | 
|  | 5180 | if (active) | 
|  | 5181 | goto rescan; | 
|  | 5182 | } | 
|  | 5183 |  | 
| Chris Wilson | 3116971 | 2009-09-14 16:50:28 +0100 | [diff] [blame] | 5184 | spin_unlock(&shrink_list_lock); | 
|  | 5185 |  | 
|  | 5186 | if (would_deadlock) | 
|  | 5187 | return -1; | 
|  | 5188 | else if (cnt > 0) | 
|  | 5189 | return (cnt / 100) * sysctl_vfs_cache_pressure; | 
|  | 5190 | else | 
|  | 5191 | return 0; | 
|  | 5192 | } | 
|  | 5193 |  | 
|  | 5194 | static struct shrinker shrinker = { | 
|  | 5195 | .shrink = i915_gem_shrink, | 
|  | 5196 | .seeks = DEFAULT_SEEKS, | 
|  | 5197 | }; | 
|  | 5198 |  | 
|  | 5199 | __init void | 
|  | 5200 | i915_gem_shrinker_init(void) | 
|  | 5201 | { | 
|  | 5202 | register_shrinker(&shrinker); | 
|  | 5203 | } | 
|  | 5204 |  | 
|  | 5205 | __exit void | 
|  | 5206 | i915_gem_shrinker_exit(void) | 
|  | 5207 | { | 
|  | 5208 | unregister_shrinker(&shrinker); | 
|  | 5209 | } |