Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include <linux/oom.h> |
| 26 | #include <linux/shmem_fs.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/swap.h> |
| 29 | #include <linux/pci.h> |
| 30 | #include <linux/dma-buf.h> |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 31 | #include <linux/vmalloc.h> |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 32 | #include <drm/drmP.h> |
| 33 | #include <drm/i915_drm.h> |
| 34 | |
| 35 | #include "i915_drv.h" |
| 36 | #include "i915_trace.h" |
| 37 | |
| 38 | static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) |
| 39 | { |
| 40 | if (!mutex_is_locked(mutex)) |
| 41 | return false; |
| 42 | |
Chris Wilson | 4f074a5 | 2016-07-11 14:46:17 +0100 | [diff] [blame] | 43 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 44 | return mutex->owner == task; |
| 45 | #else |
| 46 | /* Since UP may be pre-empted, we cannot assume that we own the lock */ |
| 47 | return false; |
| 48 | #endif |
| 49 | } |
| 50 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 51 | static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) |
| 52 | { |
| 53 | if (!mutex_trylock(&dev->struct_mutex)) { |
| 54 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) |
| 55 | return false; |
| 56 | |
| 57 | *unlock = false; |
| 58 | } else { |
| 59 | *unlock = true; |
| 60 | } |
| 61 | |
| 62 | return true; |
| 63 | } |
| 64 | |
Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 65 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 66 | { |
| 67 | struct i915_vma *vma; |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 68 | |
Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 69 | list_for_each_entry(vma, &obj->vma_list, obj_link) |
Chris Wilson | 3272db5 | 2016-08-04 16:32:32 +0100 | [diff] [blame] | 70 | if (i915_vma_is_pinned(vma)) |
Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 71 | return true; |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 72 | |
Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 73 | return false; |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | static bool swap_available(void) |
| 77 | { |
| 78 | return get_nr_swap_pages() > 0; |
| 79 | } |
| 80 | |
| 81 | static bool can_release_pages(struct drm_i915_gem_object *obj) |
| 82 | { |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 83 | if (!obj->mm.pages) |
| 84 | return false; |
| 85 | |
Chris Wilson | 1bec9b0 | 2016-04-20 12:09:52 +0100 | [diff] [blame] | 86 | /* Only shmemfs objects are backed by swap */ |
| 87 | if (!obj->base.filp) |
| 88 | return false; |
| 89 | |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 90 | /* Only report true if by unbinding the object and putting its pages |
| 91 | * we can actually make forward progress towards freeing physical |
| 92 | * pages. |
| 93 | * |
| 94 | * If the pages are pinned for any other reason than being bound |
| 95 | * to the GPU, simply unbinding from the GPU is not going to succeed |
| 96 | * in releasing our pin count on the pages themselves. |
| 97 | */ |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 98 | if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) |
Chris Wilson | 15717de | 2016-08-04 07:52:26 +0100 | [diff] [blame] | 99 | return false; |
| 100 | |
| 101 | if (any_vma_pinned(obj)) |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 102 | return false; |
| 103 | |
| 104 | /* We can only return physical pages to the system if we can either |
| 105 | * discard the contents (because the user has marked them as being |
| 106 | * purgeable) or if we can move their contents out to swap. |
| 107 | */ |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 108 | return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 109 | } |
| 110 | |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 111 | static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) |
| 112 | { |
| 113 | if (i915_gem_object_unbind(obj) == 0) |
| 114 | __i915_gem_object_put_pages(obj); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 115 | return !READ_ONCE(obj->mm.pages); |
Chris Wilson | 03ac84f | 2016-10-28 13:58:36 +0100 | [diff] [blame] | 116 | } |
| 117 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 118 | /** |
| 119 | * i915_gem_shrink - Shrink buffer object caches |
| 120 | * @dev_priv: i915 device |
| 121 | * @target: amount of memory to make available, in pages |
| 122 | * @flags: control flags for selecting cache types |
| 123 | * |
| 124 | * This function is the main interface to the shrinker. It will try to release |
| 125 | * up to @target pages of main memory backing storage from buffer objects. |
| 126 | * Selection of the specific caches can be done with @flags. This is e.g. useful |
| 127 | * when purgeable objects should be removed from caches preferentially. |
| 128 | * |
| 129 | * Note that it's not guaranteed that released amount is actually available as |
| 130 | * free system memory - the pages might still be in-used to due to other reasons |
| 131 | * (like cpu mmaps) or the mm core has reused them before we could grab them. |
| 132 | * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to |
| 133 | * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). |
| 134 | * |
| 135 | * Also note that any kind of pinning (both per-vma address space pins and |
| 136 | * backing storage pins at the buffer object level) result in the shrinker code |
| 137 | * having to skip the object. |
| 138 | * |
| 139 | * Returns: |
| 140 | * The number of pages of backing storage actually released. |
| 141 | */ |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 142 | unsigned long |
| 143 | i915_gem_shrink(struct drm_i915_private *dev_priv, |
Chris Wilson | 1438754 | 2015-10-01 12:18:25 +0100 | [diff] [blame] | 144 | unsigned long target, unsigned flags) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 145 | { |
| 146 | const struct { |
| 147 | struct list_head *list; |
| 148 | unsigned int bit; |
| 149 | } phases[] = { |
| 150 | { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, |
| 151 | { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, |
| 152 | { NULL, 0 }, |
| 153 | }, *phase; |
| 154 | unsigned long count = 0; |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 155 | bool unlock; |
| 156 | |
| 157 | if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock)) |
| 158 | return 0; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 159 | |
Chris Wilson | 3abafa5 | 2015-10-01 12:18:26 +0100 | [diff] [blame] | 160 | trace_i915_gem_shrink(dev_priv, target, flags); |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 161 | i915_gem_retire_requests(dev_priv); |
Chris Wilson | 3abafa5 | 2015-10-01 12:18:26 +0100 | [diff] [blame] | 162 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 163 | /* |
Praveen Paneri | 178a30c | 2016-05-02 14:10:28 +0530 | [diff] [blame] | 164 | * Unbinding of objects will require HW access; Let us not wake the |
| 165 | * device just to recover a little memory. If absolutely necessary, |
| 166 | * we will force the wake during oom-notifier. |
| 167 | */ |
| 168 | if ((flags & I915_SHRINK_BOUND) && |
| 169 | !intel_runtime_pm_get_if_in_use(dev_priv)) |
| 170 | flags &= ~I915_SHRINK_BOUND; |
| 171 | |
| 172 | /* |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 173 | * As we may completely rewrite the (un)bound list whilst unbinding |
| 174 | * (due to retiring requests) we have to strictly process only |
| 175 | * one element of the list at the time, and recheck the list |
| 176 | * on every iteration. |
| 177 | * |
| 178 | * In particular, we must hold a reference whilst removing the |
| 179 | * object as we may end up waiting for and/or retiring the objects. |
| 180 | * This might release the final reference (held by the active list) |
| 181 | * and result in the object being freed from under us. This is |
| 182 | * similar to the precautions the eviction code must take whilst |
| 183 | * removing objects. |
| 184 | * |
| 185 | * Also note that although these lists do not hold a reference to |
| 186 | * the object we can safely grab one here: The final object |
| 187 | * unreferencing and the bound_list are both protected by the |
| 188 | * dev->struct_mutex and so we won't ever be able to observe an |
| 189 | * object on the bound_list with a reference count equals 0. |
| 190 | */ |
| 191 | for (phase = phases; phase->list; phase++) { |
| 192 | struct list_head still_in_list; |
Chris Wilson | 2a1d775 | 2016-07-26 12:01:51 +0100 | [diff] [blame] | 193 | struct drm_i915_gem_object *obj; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 194 | |
| 195 | if ((flags & phase->bit) == 0) |
| 196 | continue; |
| 197 | |
| 198 | INIT_LIST_HEAD(&still_in_list); |
Chris Wilson | 2a1d775 | 2016-07-26 12:01:51 +0100 | [diff] [blame] | 199 | while (count < target && |
| 200 | (obj = list_first_entry_or_null(phase->list, |
| 201 | typeof(*obj), |
| 202 | global_list))) { |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 203 | list_move_tail(&obj->global_list, &still_in_list); |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 204 | if (!obj->mm.pages) { |
| 205 | list_del_init(&obj->global_list); |
| 206 | continue; |
| 207 | } |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 208 | |
| 209 | if (flags & I915_SHRINK_PURGEABLE && |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 210 | obj->mm.madv != I915_MADV_DONTNEED) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 211 | continue; |
| 212 | |
Chris Wilson | eae2c43 | 2016-04-08 12:11:12 +0100 | [diff] [blame] | 213 | if (flags & I915_SHRINK_VMAPS && |
Chris Wilson | a4f5ea6 | 2016-10-28 13:58:35 +0100 | [diff] [blame] | 214 | !is_vmalloc_addr(obj->mm.mapping)) |
Chris Wilson | eae2c43 | 2016-04-08 12:11:12 +0100 | [diff] [blame] | 215 | continue; |
| 216 | |
Chris Wilson | 45353ce | 2016-10-12 13:48:24 +0100 | [diff] [blame] | 217 | if (!(flags & I915_SHRINK_ACTIVE) && |
| 218 | (i915_gem_object_is_active(obj) || |
| 219 | obj->framebuffer_references)) |
Chris Wilson | 5763ff0 | 2015-10-01 12:18:29 +0100 | [diff] [blame] | 220 | continue; |
| 221 | |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 222 | if (!can_release_pages(obj)) |
| 223 | continue; |
| 224 | |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 225 | if (unsafe_drop_pages(obj)) { |
| 226 | mutex_lock(&obj->mm.lock); |
| 227 | if (!obj->mm.pages) { |
| 228 | __i915_gem_object_invalidate(obj); |
| 229 | count += obj->base.size >> PAGE_SHIFT; |
| 230 | } |
| 231 | mutex_unlock(&obj->mm.lock); |
| 232 | } |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 233 | } |
| 234 | list_splice(&still_in_list, phase->list); |
| 235 | } |
| 236 | |
Praveen Paneri | 178a30c | 2016-05-02 14:10:28 +0530 | [diff] [blame] | 237 | if (flags & I915_SHRINK_BOUND) |
| 238 | intel_runtime_pm_put(dev_priv); |
| 239 | |
Chris Wilson | c033666 | 2016-05-06 15:40:21 +0100 | [diff] [blame] | 240 | i915_gem_retire_requests(dev_priv); |
Chris Wilson | 1233e2d | 2016-10-28 13:58:37 +0100 | [diff] [blame] | 241 | if (unlock) |
| 242 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 243 | |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 244 | /* expedite the RCU grace period to free some request slabs */ |
| 245 | synchronize_rcu_expedited(); |
Chris Wilson | c9c0f5e | 2015-10-01 12:18:27 +0100 | [diff] [blame] | 246 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 247 | return count; |
| 248 | } |
| 249 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 250 | /** |
Daniel Vetter | 1f2449c | 2015-10-06 14:47:55 +0200 | [diff] [blame] | 251 | * i915_gem_shrink_all - Shrink buffer object caches completely |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 252 | * @dev_priv: i915 device |
| 253 | * |
| 254 | * This is a simple wraper around i915_gem_shrink() to aggressively shrink all |
| 255 | * caches completely. It also first waits for and retires all outstanding |
| 256 | * requests to also be able to release backing storage for active objects. |
| 257 | * |
| 258 | * This should only be used in code to intentionally quiescent the gpu or as a |
| 259 | * last-ditch effort when memory seems to have run out. |
| 260 | * |
| 261 | * Returns: |
| 262 | * The number of pages of backing storage actually released. |
| 263 | */ |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 264 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) |
| 265 | { |
Chris Wilson | 0eafec6 | 2016-08-04 16:32:41 +0100 | [diff] [blame] | 266 | unsigned long freed; |
| 267 | |
| 268 | freed = i915_gem_shrink(dev_priv, -1UL, |
| 269 | I915_SHRINK_BOUND | |
| 270 | I915_SHRINK_UNBOUND | |
| 271 | I915_SHRINK_ACTIVE); |
| 272 | rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ |
| 273 | |
| 274 | return freed; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 275 | } |
| 276 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 277 | static unsigned long |
| 278 | i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) |
| 279 | { |
| 280 | struct drm_i915_private *dev_priv = |
| 281 | container_of(shrinker, struct drm_i915_private, mm.shrinker); |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 282 | struct drm_device *dev = &dev_priv->drm; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 283 | struct drm_i915_gem_object *obj; |
| 284 | unsigned long count; |
| 285 | bool unlock; |
| 286 | |
| 287 | if (!i915_gem_shrinker_lock(dev, &unlock)) |
| 288 | return 0; |
| 289 | |
Chris Wilson | bed50ae | 2016-07-01 17:23:10 +0100 | [diff] [blame] | 290 | i915_gem_retire_requests(dev_priv); |
| 291 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 292 | count = 0; |
| 293 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) |
Chris Wilson | 6f0ac20 | 2016-04-04 14:46:41 +0100 | [diff] [blame] | 294 | if (can_release_pages(obj)) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 295 | count += obj->base.size >> PAGE_SHIFT; |
| 296 | |
| 297 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
Chris Wilson | 573adb3 | 2016-08-04 16:32:39 +0100 | [diff] [blame] | 298 | if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 299 | count += obj->base.size >> PAGE_SHIFT; |
| 300 | } |
| 301 | |
| 302 | if (unlock) |
| 303 | mutex_unlock(&dev->struct_mutex); |
| 304 | |
| 305 | return count; |
| 306 | } |
| 307 | |
| 308 | static unsigned long |
| 309 | i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) |
| 310 | { |
| 311 | struct drm_i915_private *dev_priv = |
| 312 | container_of(shrinker, struct drm_i915_private, mm.shrinker); |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 313 | struct drm_device *dev = &dev_priv->drm; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 314 | unsigned long freed; |
| 315 | bool unlock; |
| 316 | |
| 317 | if (!i915_gem_shrinker_lock(dev, &unlock)) |
| 318 | return SHRINK_STOP; |
| 319 | |
| 320 | freed = i915_gem_shrink(dev_priv, |
| 321 | sc->nr_to_scan, |
| 322 | I915_SHRINK_BOUND | |
| 323 | I915_SHRINK_UNBOUND | |
| 324 | I915_SHRINK_PURGEABLE); |
| 325 | if (freed < sc->nr_to_scan) |
| 326 | freed += i915_gem_shrink(dev_priv, |
| 327 | sc->nr_to_scan - freed, |
| 328 | I915_SHRINK_BOUND | |
| 329 | I915_SHRINK_UNBOUND); |
| 330 | if (unlock) |
| 331 | mutex_unlock(&dev->struct_mutex); |
| 332 | |
| 333 | return freed; |
| 334 | } |
| 335 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 336 | struct shrinker_lock_uninterruptible { |
| 337 | bool was_interruptible; |
| 338 | bool unlock; |
| 339 | }; |
| 340 | |
| 341 | static bool |
| 342 | i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, |
| 343 | struct shrinker_lock_uninterruptible *slu, |
| 344 | int timeout_ms) |
| 345 | { |
Chris Wilson | 5cba5be | 2016-08-05 10:14:13 +0100 | [diff] [blame] | 346 | unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 347 | |
Chris Wilson | 5cba5be | 2016-08-05 10:14:13 +0100 | [diff] [blame] | 348 | do { |
Chris Wilson | ea746f3 | 2016-09-09 14:11:49 +0100 | [diff] [blame] | 349 | if (i915_gem_wait_for_idle(dev_priv, 0) == 0 && |
Chris Wilson | 5cba5be | 2016-08-05 10:14:13 +0100 | [diff] [blame] | 350 | i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) |
| 351 | break; |
| 352 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 353 | schedule_timeout_killable(1); |
| 354 | if (fatal_signal_pending(current)) |
| 355 | return false; |
Chris Wilson | 5cba5be | 2016-08-05 10:14:13 +0100 | [diff] [blame] | 356 | |
| 357 | if (time_after(jiffies, timeout)) { |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 358 | pr_err("Unable to lock GPU to purge memory.\n"); |
| 359 | return false; |
| 360 | } |
Chris Wilson | 5cba5be | 2016-08-05 10:14:13 +0100 | [diff] [blame] | 361 | } while (1); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 362 | |
| 363 | slu->was_interruptible = dev_priv->mm.interruptible; |
| 364 | dev_priv->mm.interruptible = false; |
| 365 | return true; |
| 366 | } |
| 367 | |
| 368 | static void |
| 369 | i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, |
| 370 | struct shrinker_lock_uninterruptible *slu) |
| 371 | { |
| 372 | dev_priv->mm.interruptible = slu->was_interruptible; |
| 373 | if (slu->unlock) |
Chris Wilson | 91c8a32 | 2016-07-05 10:40:23 +0100 | [diff] [blame] | 374 | mutex_unlock(&dev_priv->drm.struct_mutex); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 375 | } |
| 376 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 377 | static int |
| 378 | i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) |
| 379 | { |
| 380 | struct drm_i915_private *dev_priv = |
| 381 | container_of(nb, struct drm_i915_private, mm.oom_notifier); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 382 | struct shrinker_lock_uninterruptible slu; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 383 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 384 | unsigned long unevictable, bound, unbound, freed_pages; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 385 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 386 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 387 | return NOTIFY_DONE; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 388 | |
Praveen Paneri | ea9d976 | 2016-05-02 14:10:29 +0530 | [diff] [blame] | 389 | intel_runtime_pm_get(dev_priv); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 390 | freed_pages = i915_gem_shrink_all(dev_priv); |
Praveen Paneri | ea9d976 | 2016-05-02 14:10:29 +0530 | [diff] [blame] | 391 | intel_runtime_pm_put(dev_priv); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 392 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 393 | /* Because we may be allocating inside our own driver, we cannot |
| 394 | * assert that there are no objects with pinned pages that are not |
| 395 | * being pointed to by hardware. |
| 396 | */ |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 397 | unbound = bound = unevictable = 0; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 398 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 399 | if (!obj->mm.pages) |
| 400 | continue; |
| 401 | |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 402 | if (!can_release_pages(obj)) |
| 403 | unevictable += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 404 | else |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 405 | unbound += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 406 | } |
| 407 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
Chris Wilson | fbbd37b | 2016-10-28 13:58:42 +0100 | [diff] [blame] | 408 | if (!obj->mm.pages) |
| 409 | continue; |
| 410 | |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 411 | if (!can_release_pages(obj)) |
| 412 | unevictable += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 413 | else |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 414 | bound += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 415 | } |
| 416 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 417 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 418 | |
| 419 | if (freed_pages || unbound || bound) |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 420 | pr_info("Purging GPU memory, %lu pages freed, " |
| 421 | "%lu pages still pinned.\n", |
| 422 | freed_pages, unevictable); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 423 | if (unbound || bound) |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 424 | pr_err("%lu and %lu pages still available in the " |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 425 | "bound and unbound GPU page lists.\n", |
| 426 | bound, unbound); |
| 427 | |
| 428 | *(unsigned long *)ptr += freed_pages; |
| 429 | return NOTIFY_DONE; |
| 430 | } |
| 431 | |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 432 | static int |
| 433 | i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) |
| 434 | { |
| 435 | struct drm_i915_private *dev_priv = |
| 436 | container_of(nb, struct drm_i915_private, mm.vmap_notifier); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 437 | struct shrinker_lock_uninterruptible slu; |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 438 | struct i915_vma *vma, *next; |
| 439 | unsigned long freed_pages = 0; |
| 440 | int ret; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 441 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 442 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 443 | return NOTIFY_DONE; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 444 | |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 445 | /* Force everything onto the inactive lists */ |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 446 | ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 447 | if (ret) |
| 448 | goto out; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 449 | |
Praveen Paneri | ea9d976 | 2016-05-02 14:10:29 +0530 | [diff] [blame] | 450 | intel_runtime_pm_get(dev_priv); |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 451 | freed_pages += i915_gem_shrink(dev_priv, -1UL, |
| 452 | I915_SHRINK_BOUND | |
| 453 | I915_SHRINK_UNBOUND | |
| 454 | I915_SHRINK_ACTIVE | |
| 455 | I915_SHRINK_VMAPS); |
Praveen Paneri | ea9d976 | 2016-05-02 14:10:29 +0530 | [diff] [blame] | 456 | intel_runtime_pm_put(dev_priv); |
Chris Wilson | 8ef8561 | 2016-04-28 09:56:39 +0100 | [diff] [blame] | 457 | |
| 458 | /* We also want to clear any cached iomaps as they wrap vmap */ |
| 459 | list_for_each_entry_safe(vma, next, |
| 460 | &dev_priv->ggtt.base.inactive_list, vm_link) { |
| 461 | unsigned long count = vma->node.size >> PAGE_SHIFT; |
| 462 | if (vma->iomap && i915_vma_unbind(vma) == 0) |
| 463 | freed_pages += count; |
| 464 | } |
| 465 | |
| 466 | out: |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 467 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 468 | |
| 469 | *(unsigned long *)ptr += freed_pages; |
| 470 | return NOTIFY_DONE; |
| 471 | } |
| 472 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 473 | /** |
| 474 | * i915_gem_shrinker_init - Initialize i915 shrinker |
| 475 | * @dev_priv: i915 device |
| 476 | * |
| 477 | * This function registers and sets up the i915 shrinker and OOM handler. |
| 478 | */ |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 479 | void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) |
| 480 | { |
| 481 | dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; |
| 482 | dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; |
| 483 | dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 484 | WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 485 | |
| 486 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 487 | WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 488 | |
| 489 | dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; |
| 490 | WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 491 | } |
| 492 | |
| 493 | /** |
| 494 | * i915_gem_shrinker_cleanup - Clean up i915 shrinker |
| 495 | * @dev_priv: i915 device |
| 496 | * |
| 497 | * This function unregisters the i915 shrinker and OOM handler. |
| 498 | */ |
| 499 | void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) |
| 500 | { |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 501 | WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 502 | WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); |
| 503 | unregister_shrinker(&dev_priv->mm.shrinker); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 504 | } |