Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2015 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include <linux/oom.h> |
| 26 | #include <linux/shmem_fs.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/swap.h> |
| 29 | #include <linux/pci.h> |
| 30 | #include <linux/dma-buf.h> |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 31 | #include <linux/vmalloc.h> |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 32 | #include <drm/drmP.h> |
| 33 | #include <drm/i915_drm.h> |
| 34 | |
| 35 | #include "i915_drv.h" |
| 36 | #include "i915_trace.h" |
| 37 | |
| 38 | static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) |
| 39 | { |
| 40 | if (!mutex_is_locked(mutex)) |
| 41 | return false; |
| 42 | |
| 43 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) |
| 44 | return mutex->owner == task; |
| 45 | #else |
| 46 | /* Since UP may be pre-empted, we cannot assume that we own the lock */ |
| 47 | return false; |
| 48 | #endif |
| 49 | } |
| 50 | |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 51 | static int num_vma_bound(struct drm_i915_gem_object *obj) |
| 52 | { |
| 53 | struct i915_vma *vma; |
| 54 | int count = 0; |
| 55 | |
Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 56 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 57 | if (drm_mm_node_allocated(&vma->node)) |
| 58 | count++; |
| 59 | if (vma->pin_count) |
| 60 | count++; |
| 61 | } |
| 62 | |
| 63 | return count; |
| 64 | } |
| 65 | |
| 66 | static bool swap_available(void) |
| 67 | { |
| 68 | return get_nr_swap_pages() > 0; |
| 69 | } |
| 70 | |
| 71 | static bool can_release_pages(struct drm_i915_gem_object *obj) |
| 72 | { |
Chris Wilson | 1bec9b0 | 2016-04-20 12:09:52 +0100 | [diff] [blame] | 73 | /* Only shmemfs objects are backed by swap */ |
| 74 | if (!obj->base.filp) |
| 75 | return false; |
| 76 | |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 77 | /* Only report true if by unbinding the object and putting its pages |
| 78 | * we can actually make forward progress towards freeing physical |
| 79 | * pages. |
| 80 | * |
| 81 | * If the pages are pinned for any other reason than being bound |
| 82 | * to the GPU, simply unbinding from the GPU is not going to succeed |
| 83 | * in releasing our pin count on the pages themselves. |
| 84 | */ |
| 85 | if (obj->pages_pin_count != num_vma_bound(obj)) |
| 86 | return false; |
| 87 | |
| 88 | /* We can only return physical pages to the system if we can either |
| 89 | * discard the contents (because the user has marked them as being |
| 90 | * purgeable) or if we can move their contents out to swap. |
| 91 | */ |
| 92 | return swap_available() || obj->madv == I915_MADV_DONTNEED; |
| 93 | } |
| 94 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 95 | /** |
| 96 | * i915_gem_shrink - Shrink buffer object caches |
| 97 | * @dev_priv: i915 device |
| 98 | * @target: amount of memory to make available, in pages |
| 99 | * @flags: control flags for selecting cache types |
| 100 | * |
| 101 | * This function is the main interface to the shrinker. It will try to release |
| 102 | * up to @target pages of main memory backing storage from buffer objects. |
| 103 | * Selection of the specific caches can be done with @flags. This is e.g. useful |
| 104 | * when purgeable objects should be removed from caches preferentially. |
| 105 | * |
| 106 | * Note that it's not guaranteed that released amount is actually available as |
| 107 | * free system memory - the pages might still be in-used to due to other reasons |
| 108 | * (like cpu mmaps) or the mm core has reused them before we could grab them. |
| 109 | * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to |
| 110 | * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). |
| 111 | * |
| 112 | * Also note that any kind of pinning (both per-vma address space pins and |
| 113 | * backing storage pins at the buffer object level) result in the shrinker code |
| 114 | * having to skip the object. |
| 115 | * |
| 116 | * Returns: |
| 117 | * The number of pages of backing storage actually released. |
| 118 | */ |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 119 | unsigned long |
| 120 | i915_gem_shrink(struct drm_i915_private *dev_priv, |
Chris Wilson | 1438754 | 2015-10-01 12:18:25 +0100 | [diff] [blame] | 121 | unsigned long target, unsigned flags) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 122 | { |
| 123 | const struct { |
| 124 | struct list_head *list; |
| 125 | unsigned int bit; |
| 126 | } phases[] = { |
| 127 | { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, |
| 128 | { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, |
| 129 | { NULL, 0 }, |
| 130 | }, *phase; |
| 131 | unsigned long count = 0; |
| 132 | |
Chris Wilson | 3abafa5 | 2015-10-01 12:18:26 +0100 | [diff] [blame] | 133 | trace_i915_gem_shrink(dev_priv, target, flags); |
Chris Wilson | c9c0f5e | 2015-10-01 12:18:27 +0100 | [diff] [blame] | 134 | i915_gem_retire_requests(dev_priv->dev); |
Chris Wilson | 3abafa5 | 2015-10-01 12:18:26 +0100 | [diff] [blame] | 135 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 136 | /* |
| 137 | * As we may completely rewrite the (un)bound list whilst unbinding |
| 138 | * (due to retiring requests) we have to strictly process only |
| 139 | * one element of the list at the time, and recheck the list |
| 140 | * on every iteration. |
| 141 | * |
| 142 | * In particular, we must hold a reference whilst removing the |
| 143 | * object as we may end up waiting for and/or retiring the objects. |
| 144 | * This might release the final reference (held by the active list) |
| 145 | * and result in the object being freed from under us. This is |
| 146 | * similar to the precautions the eviction code must take whilst |
| 147 | * removing objects. |
| 148 | * |
| 149 | * Also note that although these lists do not hold a reference to |
| 150 | * the object we can safely grab one here: The final object |
| 151 | * unreferencing and the bound_list are both protected by the |
| 152 | * dev->struct_mutex and so we won't ever be able to observe an |
| 153 | * object on the bound_list with a reference count equals 0. |
| 154 | */ |
| 155 | for (phase = phases; phase->list; phase++) { |
| 156 | struct list_head still_in_list; |
| 157 | |
| 158 | if ((flags & phase->bit) == 0) |
| 159 | continue; |
| 160 | |
| 161 | INIT_LIST_HEAD(&still_in_list); |
| 162 | while (count < target && !list_empty(phase->list)) { |
| 163 | struct drm_i915_gem_object *obj; |
| 164 | struct i915_vma *vma, *v; |
| 165 | |
| 166 | obj = list_first_entry(phase->list, |
| 167 | typeof(*obj), global_list); |
| 168 | list_move_tail(&obj->global_list, &still_in_list); |
| 169 | |
| 170 | if (flags & I915_SHRINK_PURGEABLE && |
| 171 | obj->madv != I915_MADV_DONTNEED) |
| 172 | continue; |
| 173 | |
Chris Wilson | eae2c43 | 2016-04-08 12:11:12 +0100 | [diff] [blame] | 174 | if (flags & I915_SHRINK_VMAPS && |
| 175 | !is_vmalloc_addr(obj->mapping)) |
| 176 | continue; |
| 177 | |
Chris Wilson | 5763ff0 | 2015-10-01 12:18:29 +0100 | [diff] [blame] | 178 | if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) |
| 179 | continue; |
| 180 | |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 181 | if (!can_release_pages(obj)) |
| 182 | continue; |
| 183 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 184 | drm_gem_object_reference(&obj->base); |
| 185 | |
| 186 | /* For the unbound phase, this should be a no-op! */ |
| 187 | list_for_each_entry_safe(vma, v, |
Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 188 | &obj->vma_list, obj_link) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 189 | if (i915_vma_unbind(vma)) |
| 190 | break; |
| 191 | |
| 192 | if (i915_gem_object_put_pages(obj) == 0) |
| 193 | count += obj->base.size >> PAGE_SHIFT; |
| 194 | |
| 195 | drm_gem_object_unreference(&obj->base); |
| 196 | } |
| 197 | list_splice(&still_in_list, phase->list); |
| 198 | } |
| 199 | |
Chris Wilson | c9c0f5e | 2015-10-01 12:18:27 +0100 | [diff] [blame] | 200 | i915_gem_retire_requests(dev_priv->dev); |
| 201 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 202 | return count; |
| 203 | } |
| 204 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 205 | /** |
Daniel Vetter | 1f2449c | 2015-10-06 14:47:55 +0200 | [diff] [blame] | 206 | * i915_gem_shrink_all - Shrink buffer object caches completely |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 207 | * @dev_priv: i915 device |
| 208 | * |
| 209 | * This is a simple wraper around i915_gem_shrink() to aggressively shrink all |
| 210 | * caches completely. It also first waits for and retires all outstanding |
| 211 | * requests to also be able to release backing storage for active objects. |
| 212 | * |
| 213 | * This should only be used in code to intentionally quiescent the gpu or as a |
| 214 | * last-ditch effort when memory seems to have run out. |
| 215 | * |
| 216 | * Returns: |
| 217 | * The number of pages of backing storage actually released. |
| 218 | */ |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 219 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) |
| 220 | { |
Chris Wilson | 1438754 | 2015-10-01 12:18:25 +0100 | [diff] [blame] | 221 | return i915_gem_shrink(dev_priv, -1UL, |
Chris Wilson | 5763ff0 | 2015-10-01 12:18:29 +0100 | [diff] [blame] | 222 | I915_SHRINK_BOUND | |
| 223 | I915_SHRINK_UNBOUND | |
| 224 | I915_SHRINK_ACTIVE); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) |
| 228 | { |
| 229 | if (!mutex_trylock(&dev->struct_mutex)) { |
| 230 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) |
| 231 | return false; |
| 232 | |
| 233 | if (to_i915(dev)->mm.shrinker_no_lock_stealing) |
| 234 | return false; |
| 235 | |
| 236 | *unlock = false; |
| 237 | } else |
| 238 | *unlock = true; |
| 239 | |
| 240 | return true; |
| 241 | } |
| 242 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 243 | static unsigned long |
| 244 | i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) |
| 245 | { |
| 246 | struct drm_i915_private *dev_priv = |
| 247 | container_of(shrinker, struct drm_i915_private, mm.shrinker); |
| 248 | struct drm_device *dev = dev_priv->dev; |
| 249 | struct drm_i915_gem_object *obj; |
| 250 | unsigned long count; |
| 251 | bool unlock; |
| 252 | |
| 253 | if (!i915_gem_shrinker_lock(dev, &unlock)) |
| 254 | return 0; |
| 255 | |
| 256 | count = 0; |
| 257 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) |
Chris Wilson | 6f0ac20 | 2016-04-04 14:46:41 +0100 | [diff] [blame] | 258 | if (can_release_pages(obj)) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 259 | count += obj->base.size >> PAGE_SHIFT; |
| 260 | |
| 261 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
Chris Wilson | c1a415e | 2015-12-04 15:58:54 +0000 | [diff] [blame] | 262 | if (!obj->active && can_release_pages(obj)) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 263 | count += obj->base.size >> PAGE_SHIFT; |
| 264 | } |
| 265 | |
| 266 | if (unlock) |
| 267 | mutex_unlock(&dev->struct_mutex); |
| 268 | |
| 269 | return count; |
| 270 | } |
| 271 | |
| 272 | static unsigned long |
| 273 | i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) |
| 274 | { |
| 275 | struct drm_i915_private *dev_priv = |
| 276 | container_of(shrinker, struct drm_i915_private, mm.shrinker); |
| 277 | struct drm_device *dev = dev_priv->dev; |
| 278 | unsigned long freed; |
| 279 | bool unlock; |
| 280 | |
| 281 | if (!i915_gem_shrinker_lock(dev, &unlock)) |
| 282 | return SHRINK_STOP; |
| 283 | |
| 284 | freed = i915_gem_shrink(dev_priv, |
| 285 | sc->nr_to_scan, |
| 286 | I915_SHRINK_BOUND | |
| 287 | I915_SHRINK_UNBOUND | |
| 288 | I915_SHRINK_PURGEABLE); |
| 289 | if (freed < sc->nr_to_scan) |
| 290 | freed += i915_gem_shrink(dev_priv, |
| 291 | sc->nr_to_scan - freed, |
| 292 | I915_SHRINK_BOUND | |
| 293 | I915_SHRINK_UNBOUND); |
| 294 | if (unlock) |
| 295 | mutex_unlock(&dev->struct_mutex); |
| 296 | |
| 297 | return freed; |
| 298 | } |
| 299 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 300 | struct shrinker_lock_uninterruptible { |
| 301 | bool was_interruptible; |
| 302 | bool unlock; |
| 303 | }; |
| 304 | |
| 305 | static bool |
| 306 | i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, |
| 307 | struct shrinker_lock_uninterruptible *slu, |
| 308 | int timeout_ms) |
| 309 | { |
| 310 | unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; |
| 311 | |
| 312 | while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { |
| 313 | schedule_timeout_killable(1); |
| 314 | if (fatal_signal_pending(current)) |
| 315 | return false; |
| 316 | if (--timeout == 0) { |
| 317 | pr_err("Unable to lock GPU to purge memory.\n"); |
| 318 | return false; |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | slu->was_interruptible = dev_priv->mm.interruptible; |
| 323 | dev_priv->mm.interruptible = false; |
| 324 | return true; |
| 325 | } |
| 326 | |
| 327 | static void |
| 328 | i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, |
| 329 | struct shrinker_lock_uninterruptible *slu) |
| 330 | { |
| 331 | dev_priv->mm.interruptible = slu->was_interruptible; |
| 332 | if (slu->unlock) |
| 333 | mutex_unlock(&dev_priv->dev->struct_mutex); |
| 334 | } |
| 335 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 336 | static int |
| 337 | i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) |
| 338 | { |
| 339 | struct drm_i915_private *dev_priv = |
| 340 | container_of(nb, struct drm_i915_private, mm.oom_notifier); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 341 | struct shrinker_lock_uninterruptible slu; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 342 | struct drm_i915_gem_object *obj; |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 343 | unsigned long unevictable, bound, unbound, freed_pages; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 344 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 345 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 346 | return NOTIFY_DONE; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 347 | |
| 348 | freed_pages = i915_gem_shrink_all(dev_priv); |
| 349 | |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 350 | /* Because we may be allocating inside our own driver, we cannot |
| 351 | * assert that there are no objects with pinned pages that are not |
| 352 | * being pointed to by hardware. |
| 353 | */ |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 354 | unbound = bound = unevictable = 0; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 355 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 356 | if (!can_release_pages(obj)) |
| 357 | unevictable += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 358 | else |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 359 | unbound += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 360 | } |
| 361 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 362 | if (!can_release_pages(obj)) |
| 363 | unevictable += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 364 | else |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 365 | bound += obj->base.size >> PAGE_SHIFT; |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 366 | } |
| 367 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 368 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 369 | |
| 370 | if (freed_pages || unbound || bound) |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 371 | pr_info("Purging GPU memory, %lu pages freed, " |
| 372 | "%lu pages still pinned.\n", |
| 373 | freed_pages, unevictable); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 374 | if (unbound || bound) |
Chris Wilson | 1768d45 | 2016-04-20 12:09:51 +0100 | [diff] [blame] | 375 | pr_err("%lu and %lu pages still available in the " |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 376 | "bound and unbound GPU page lists.\n", |
| 377 | bound, unbound); |
| 378 | |
| 379 | *(unsigned long *)ptr += freed_pages; |
| 380 | return NOTIFY_DONE; |
| 381 | } |
| 382 | |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 383 | static int |
| 384 | i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) |
| 385 | { |
| 386 | struct drm_i915_private *dev_priv = |
| 387 | container_of(nb, struct drm_i915_private, mm.vmap_notifier); |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 388 | struct shrinker_lock_uninterruptible slu; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 389 | unsigned long freed_pages; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 390 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 391 | if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 392 | return NOTIFY_DONE; |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 393 | |
Chris Wilson | eae2c43 | 2016-04-08 12:11:12 +0100 | [diff] [blame] | 394 | freed_pages = i915_gem_shrink(dev_priv, -1UL, |
| 395 | I915_SHRINK_BOUND | |
| 396 | I915_SHRINK_UNBOUND | |
| 397 | I915_SHRINK_ACTIVE | |
| 398 | I915_SHRINK_VMAPS); |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 399 | |
Chris Wilson | 168cf36 | 2016-04-05 10:22:25 +0100 | [diff] [blame] | 400 | i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 401 | |
| 402 | *(unsigned long *)ptr += freed_pages; |
| 403 | return NOTIFY_DONE; |
| 404 | } |
| 405 | |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 406 | /** |
| 407 | * i915_gem_shrinker_init - Initialize i915 shrinker |
| 408 | * @dev_priv: i915 device |
| 409 | * |
| 410 | * This function registers and sets up the i915 shrinker and OOM handler. |
| 411 | */ |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 412 | void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) |
| 413 | { |
| 414 | dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; |
| 415 | dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; |
| 416 | dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 417 | WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 418 | |
| 419 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 420 | WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 421 | |
| 422 | dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; |
| 423 | WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | /** |
| 427 | * i915_gem_shrinker_cleanup - Clean up i915 shrinker |
| 428 | * @dev_priv: i915 device |
| 429 | * |
| 430 | * This function unregisters the i915 shrinker and OOM handler. |
| 431 | */ |
| 432 | void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) |
| 433 | { |
Chris Wilson | e87666b | 2016-04-04 14:46:43 +0100 | [diff] [blame] | 434 | WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); |
Imre Deak | a8a4058 | 2016-01-19 15:26:28 +0200 | [diff] [blame] | 435 | WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); |
| 436 | unregister_shrinker(&dev_priv->mm.shrinker); |
Daniel Vetter | be6a037 | 2015-03-18 10:46:04 +0100 | [diff] [blame] | 437 | } |