blob: 9b92b6470ccc7cc8b2ab5d0b8d41a64b3226c785 [file] [log] [blame]
Daniel Vetterbe6a0372015-03-18 10:46:04 +01001/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/oom.h>
26#include <linux/shmem_fs.h>
27#include <linux/slab.h>
28#include <linux/swap.h>
29#include <linux/pci.h>
30#include <linux/dma-buf.h>
Chris Wilsone87666b2016-04-04 14:46:43 +010031#include <linux/vmalloc.h>
Daniel Vetterbe6a0372015-03-18 10:46:04 +010032#include <drm/drmP.h>
33#include <drm/i915_drm.h>
34
35#include "i915_drv.h"
36#include "i915_trace.h"
37
38static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
39{
40 if (!mutex_is_locked(mutex))
41 return false;
42
Chris Wilson4f074a52016-07-11 14:46:17 +010043#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
Daniel Vetterbe6a0372015-03-18 10:46:04 +010044 return mutex->owner == task;
45#else
46 /* Since UP may be pre-empted, we cannot assume that we own the lock */
47 return false;
48#endif
49}
50
Chris Wilson15717de2016-08-04 07:52:26 +010051static bool any_vma_pinned(struct drm_i915_gem_object *obj)
Chris Wilsonc1a415e2015-12-04 15:58:54 +000052{
53 struct i915_vma *vma;
Chris Wilsonc1a415e2015-12-04 15:58:54 +000054
Chris Wilson15717de2016-08-04 07:52:26 +010055 list_for_each_entry(vma, &obj->vma_list, obj_link)
Chris Wilson3272db52016-08-04 16:32:32 +010056 if (i915_vma_is_pinned(vma))
Chris Wilson15717de2016-08-04 07:52:26 +010057 return true;
Chris Wilsonc1a415e2015-12-04 15:58:54 +000058
Chris Wilson15717de2016-08-04 07:52:26 +010059 return false;
Chris Wilsonc1a415e2015-12-04 15:58:54 +000060}
61
62static bool swap_available(void)
63{
64 return get_nr_swap_pages() > 0;
65}
66
67static bool can_release_pages(struct drm_i915_gem_object *obj)
68{
Chris Wilson1bec9b02016-04-20 12:09:52 +010069 /* Only shmemfs objects are backed by swap */
70 if (!obj->base.filp)
71 return false;
72
Chris Wilsonc1a415e2015-12-04 15:58:54 +000073 /* Only report true if by unbinding the object and putting its pages
74 * we can actually make forward progress towards freeing physical
75 * pages.
76 *
77 * If the pages are pinned for any other reason than being bound
78 * to the GPU, simply unbinding from the GPU is not going to succeed
79 * in releasing our pin count on the pages themselves.
80 */
Chris Wilson15717de2016-08-04 07:52:26 +010081 if (obj->pages_pin_count > obj->bind_count)
82 return false;
83
84 if (any_vma_pinned(obj))
Chris Wilsonc1a415e2015-12-04 15:58:54 +000085 return false;
86
87 /* We can only return physical pages to the system if we can either
88 * discard the contents (because the user has marked them as being
89 * purgeable) or if we can move their contents out to swap.
90 */
91 return swap_available() || obj->madv == I915_MADV_DONTNEED;
92}
93
Daniel Vettereb0b44a2015-03-18 14:47:59 +010094/**
95 * i915_gem_shrink - Shrink buffer object caches
96 * @dev_priv: i915 device
97 * @target: amount of memory to make available, in pages
98 * @flags: control flags for selecting cache types
99 *
100 * This function is the main interface to the shrinker. It will try to release
101 * up to @target pages of main memory backing storage from buffer objects.
102 * Selection of the specific caches can be done with @flags. This is e.g. useful
103 * when purgeable objects should be removed from caches preferentially.
104 *
105 * Note that it's not guaranteed that released amount is actually available as
106 * free system memory - the pages might still be in-used to due to other reasons
107 * (like cpu mmaps) or the mm core has reused them before we could grab them.
108 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
109 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
110 *
111 * Also note that any kind of pinning (both per-vma address space pins and
112 * backing storage pins at the buffer object level) result in the shrinker code
113 * having to skip the object.
114 *
115 * Returns:
116 * The number of pages of backing storage actually released.
117 */
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100118unsigned long
119i915_gem_shrink(struct drm_i915_private *dev_priv,
Chris Wilson14387542015-10-01 12:18:25 +0100120 unsigned long target, unsigned flags)
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100121{
122 const struct {
123 struct list_head *list;
124 unsigned int bit;
125 } phases[] = {
126 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
127 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
128 { NULL, 0 },
129 }, *phase;
130 unsigned long count = 0;
131
Chris Wilson3abafa52015-10-01 12:18:26 +0100132 trace_i915_gem_shrink(dev_priv, target, flags);
Chris Wilsonc0336662016-05-06 15:40:21 +0100133 i915_gem_retire_requests(dev_priv);
Chris Wilson3abafa52015-10-01 12:18:26 +0100134
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100135 /*
Praveen Paneri178a30c2016-05-02 14:10:28 +0530136 * Unbinding of objects will require HW access; Let us not wake the
137 * device just to recover a little memory. If absolutely necessary,
138 * we will force the wake during oom-notifier.
139 */
140 if ((flags & I915_SHRINK_BOUND) &&
141 !intel_runtime_pm_get_if_in_use(dev_priv))
142 flags &= ~I915_SHRINK_BOUND;
143
144 /*
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100145 * As we may completely rewrite the (un)bound list whilst unbinding
146 * (due to retiring requests) we have to strictly process only
147 * one element of the list at the time, and recheck the list
148 * on every iteration.
149 *
150 * In particular, we must hold a reference whilst removing the
151 * object as we may end up waiting for and/or retiring the objects.
152 * This might release the final reference (held by the active list)
153 * and result in the object being freed from under us. This is
154 * similar to the precautions the eviction code must take whilst
155 * removing objects.
156 *
157 * Also note that although these lists do not hold a reference to
158 * the object we can safely grab one here: The final object
159 * unreferencing and the bound_list are both protected by the
160 * dev->struct_mutex and so we won't ever be able to observe an
161 * object on the bound_list with a reference count equals 0.
162 */
163 for (phase = phases; phase->list; phase++) {
164 struct list_head still_in_list;
Chris Wilson2a1d7752016-07-26 12:01:51 +0100165 struct drm_i915_gem_object *obj;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100166
167 if ((flags & phase->bit) == 0)
168 continue;
169
170 INIT_LIST_HEAD(&still_in_list);
Chris Wilson2a1d7752016-07-26 12:01:51 +0100171 while (count < target &&
172 (obj = list_first_entry_or_null(phase->list,
173 typeof(*obj),
174 global_list))) {
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100175 list_move_tail(&obj->global_list, &still_in_list);
176
177 if (flags & I915_SHRINK_PURGEABLE &&
178 obj->madv != I915_MADV_DONTNEED)
179 continue;
180
Chris Wilsoneae2c432016-04-08 12:11:12 +0100181 if (flags & I915_SHRINK_VMAPS &&
182 !is_vmalloc_addr(obj->mapping))
183 continue;
184
Chris Wilson573adb32016-08-04 16:32:39 +0100185 if ((flags & I915_SHRINK_ACTIVE) == 0 &&
186 i915_gem_object_is_active(obj))
Chris Wilson5763ff02015-10-01 12:18:29 +0100187 continue;
188
Chris Wilsonc1a415e2015-12-04 15:58:54 +0000189 if (!can_release_pages(obj))
190 continue;
191
Chris Wilson25dc5562016-07-20 13:31:52 +0100192 i915_gem_object_get(obj);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100193
194 /* For the unbound phase, this should be a no-op! */
Chris Wilsonaa653a62016-08-04 07:52:27 +0100195 i915_gem_object_unbind(obj);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100196 if (i915_gem_object_put_pages(obj) == 0)
197 count += obj->base.size >> PAGE_SHIFT;
198
Chris Wilsonf8c417c2016-07-20 13:31:53 +0100199 i915_gem_object_put(obj);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100200 }
201 list_splice(&still_in_list, phase->list);
202 }
203
Praveen Paneri178a30c2016-05-02 14:10:28 +0530204 if (flags & I915_SHRINK_BOUND)
205 intel_runtime_pm_put(dev_priv);
206
Chris Wilsonc0336662016-05-06 15:40:21 +0100207 i915_gem_retire_requests(dev_priv);
Chris Wilson0eafec62016-08-04 16:32:41 +0100208 /* expedite the RCU grace period to free some request slabs */
209 synchronize_rcu_expedited();
Chris Wilsonc9c0f5e2015-10-01 12:18:27 +0100210
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100211 return count;
212}
213
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100214/**
Daniel Vetter1f2449c2015-10-06 14:47:55 +0200215 * i915_gem_shrink_all - Shrink buffer object caches completely
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100216 * @dev_priv: i915 device
217 *
218 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
219 * caches completely. It also first waits for and retires all outstanding
220 * requests to also be able to release backing storage for active objects.
221 *
222 * This should only be used in code to intentionally quiescent the gpu or as a
223 * last-ditch effort when memory seems to have run out.
224 *
225 * Returns:
226 * The number of pages of backing storage actually released.
227 */
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100228unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
229{
Chris Wilson0eafec62016-08-04 16:32:41 +0100230 unsigned long freed;
231
232 freed = i915_gem_shrink(dev_priv, -1UL,
233 I915_SHRINK_BOUND |
234 I915_SHRINK_UNBOUND |
235 I915_SHRINK_ACTIVE);
236 rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
237
238 return freed;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100239}
240
241static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
242{
243 if (!mutex_trylock(&dev->struct_mutex)) {
244 if (!mutex_is_locked_by(&dev->struct_mutex, current))
245 return false;
246
247 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
248 return false;
249
250 *unlock = false;
251 } else
252 *unlock = true;
253
254 return true;
255}
256
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100257static unsigned long
258i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
259{
260 struct drm_i915_private *dev_priv =
261 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Chris Wilson91c8a322016-07-05 10:40:23 +0100262 struct drm_device *dev = &dev_priv->drm;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100263 struct drm_i915_gem_object *obj;
264 unsigned long count;
265 bool unlock;
266
267 if (!i915_gem_shrinker_lock(dev, &unlock))
268 return 0;
269
Chris Wilsonbed50ae2016-07-01 17:23:10 +0100270 i915_gem_retire_requests(dev_priv);
271
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100272 count = 0;
273 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
Chris Wilson6f0ac202016-04-04 14:46:41 +0100274 if (can_release_pages(obj))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100275 count += obj->base.size >> PAGE_SHIFT;
276
277 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson573adb32016-08-04 16:32:39 +0100278 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100279 count += obj->base.size >> PAGE_SHIFT;
280 }
281
282 if (unlock)
283 mutex_unlock(&dev->struct_mutex);
284
285 return count;
286}
287
288static unsigned long
289i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
290{
291 struct drm_i915_private *dev_priv =
292 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Chris Wilson91c8a322016-07-05 10:40:23 +0100293 struct drm_device *dev = &dev_priv->drm;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100294 unsigned long freed;
295 bool unlock;
296
297 if (!i915_gem_shrinker_lock(dev, &unlock))
298 return SHRINK_STOP;
299
300 freed = i915_gem_shrink(dev_priv,
301 sc->nr_to_scan,
302 I915_SHRINK_BOUND |
303 I915_SHRINK_UNBOUND |
304 I915_SHRINK_PURGEABLE);
305 if (freed < sc->nr_to_scan)
306 freed += i915_gem_shrink(dev_priv,
307 sc->nr_to_scan - freed,
308 I915_SHRINK_BOUND |
309 I915_SHRINK_UNBOUND);
310 if (unlock)
311 mutex_unlock(&dev->struct_mutex);
312
313 return freed;
314}
315
Chris Wilson168cf362016-04-05 10:22:25 +0100316struct shrinker_lock_uninterruptible {
317 bool was_interruptible;
318 bool unlock;
319};
320
321static bool
322i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
323 struct shrinker_lock_uninterruptible *slu,
324 int timeout_ms)
325{
Chris Wilson5cba5be2016-08-05 10:14:13 +0100326 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
Chris Wilson168cf362016-04-05 10:22:25 +0100327
Chris Wilson5cba5be2016-08-05 10:14:13 +0100328 do {
329 if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
330 i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
331 break;
332
Chris Wilson168cf362016-04-05 10:22:25 +0100333 schedule_timeout_killable(1);
334 if (fatal_signal_pending(current))
335 return false;
Chris Wilson5cba5be2016-08-05 10:14:13 +0100336
337 if (time_after(jiffies, timeout)) {
Chris Wilson168cf362016-04-05 10:22:25 +0100338 pr_err("Unable to lock GPU to purge memory.\n");
339 return false;
340 }
Chris Wilson5cba5be2016-08-05 10:14:13 +0100341 } while (1);
Chris Wilson168cf362016-04-05 10:22:25 +0100342
343 slu->was_interruptible = dev_priv->mm.interruptible;
344 dev_priv->mm.interruptible = false;
345 return true;
346}
347
348static void
349i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
350 struct shrinker_lock_uninterruptible *slu)
351{
352 dev_priv->mm.interruptible = slu->was_interruptible;
353 if (slu->unlock)
Chris Wilson91c8a322016-07-05 10:40:23 +0100354 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson168cf362016-04-05 10:22:25 +0100355}
356
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100357static int
358i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
359{
360 struct drm_i915_private *dev_priv =
361 container_of(nb, struct drm_i915_private, mm.oom_notifier);
Chris Wilson168cf362016-04-05 10:22:25 +0100362 struct shrinker_lock_uninterruptible slu;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100363 struct drm_i915_gem_object *obj;
Chris Wilson1768d452016-04-20 12:09:51 +0100364 unsigned long unevictable, bound, unbound, freed_pages;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100365
Chris Wilson168cf362016-04-05 10:22:25 +0100366 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100367 return NOTIFY_DONE;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100368
Praveen Paneriea9d9762016-05-02 14:10:29 +0530369 intel_runtime_pm_get(dev_priv);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100370 freed_pages = i915_gem_shrink_all(dev_priv);
Praveen Paneriea9d9762016-05-02 14:10:29 +0530371 intel_runtime_pm_put(dev_priv);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100372
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100373 /* Because we may be allocating inside our own driver, we cannot
374 * assert that there are no objects with pinned pages that are not
375 * being pointed to by hardware.
376 */
Chris Wilson1768d452016-04-20 12:09:51 +0100377 unbound = bound = unevictable = 0;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100378 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
Chris Wilson1768d452016-04-20 12:09:51 +0100379 if (!can_release_pages(obj))
380 unevictable += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100381 else
Chris Wilson1768d452016-04-20 12:09:51 +0100382 unbound += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100383 }
384 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
Chris Wilson1768d452016-04-20 12:09:51 +0100385 if (!can_release_pages(obj))
386 unevictable += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100387 else
Chris Wilson1768d452016-04-20 12:09:51 +0100388 bound += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100389 }
390
Chris Wilson168cf362016-04-05 10:22:25 +0100391 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100392
393 if (freed_pages || unbound || bound)
Chris Wilson1768d452016-04-20 12:09:51 +0100394 pr_info("Purging GPU memory, %lu pages freed, "
395 "%lu pages still pinned.\n",
396 freed_pages, unevictable);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100397 if (unbound || bound)
Chris Wilson1768d452016-04-20 12:09:51 +0100398 pr_err("%lu and %lu pages still available in the "
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100399 "bound and unbound GPU page lists.\n",
400 bound, unbound);
401
402 *(unsigned long *)ptr += freed_pages;
403 return NOTIFY_DONE;
404}
405
Chris Wilsone87666b2016-04-04 14:46:43 +0100406static int
407i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
408{
409 struct drm_i915_private *dev_priv =
410 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
Chris Wilson168cf362016-04-05 10:22:25 +0100411 struct shrinker_lock_uninterruptible slu;
Chris Wilson8ef85612016-04-28 09:56:39 +0100412 struct i915_vma *vma, *next;
413 unsigned long freed_pages = 0;
414 int ret;
Chris Wilsone87666b2016-04-04 14:46:43 +0100415
Chris Wilson168cf362016-04-05 10:22:25 +0100416 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
Chris Wilsone87666b2016-04-04 14:46:43 +0100417 return NOTIFY_DONE;
Chris Wilsone87666b2016-04-04 14:46:43 +0100418
Chris Wilson8ef85612016-04-28 09:56:39 +0100419 /* Force everything onto the inactive lists */
Chris Wilsondcff85c2016-08-05 10:14:11 +0100420 ret = i915_gem_wait_for_idle(dev_priv, false);
Chris Wilson8ef85612016-04-28 09:56:39 +0100421 if (ret)
422 goto out;
Chris Wilsone87666b2016-04-04 14:46:43 +0100423
Praveen Paneriea9d9762016-05-02 14:10:29 +0530424 intel_runtime_pm_get(dev_priv);
Chris Wilson8ef85612016-04-28 09:56:39 +0100425 freed_pages += i915_gem_shrink(dev_priv, -1UL,
426 I915_SHRINK_BOUND |
427 I915_SHRINK_UNBOUND |
428 I915_SHRINK_ACTIVE |
429 I915_SHRINK_VMAPS);
Praveen Paneriea9d9762016-05-02 14:10:29 +0530430 intel_runtime_pm_put(dev_priv);
Chris Wilson8ef85612016-04-28 09:56:39 +0100431
432 /* We also want to clear any cached iomaps as they wrap vmap */
433 list_for_each_entry_safe(vma, next,
434 &dev_priv->ggtt.base.inactive_list, vm_link) {
435 unsigned long count = vma->node.size >> PAGE_SHIFT;
436 if (vma->iomap && i915_vma_unbind(vma) == 0)
437 freed_pages += count;
438 }
439
440out:
Chris Wilson168cf362016-04-05 10:22:25 +0100441 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
Chris Wilsone87666b2016-04-04 14:46:43 +0100442
443 *(unsigned long *)ptr += freed_pages;
444 return NOTIFY_DONE;
445}
446
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100447/**
448 * i915_gem_shrinker_init - Initialize i915 shrinker
449 * @dev_priv: i915 device
450 *
451 * This function registers and sets up the i915 shrinker and OOM handler.
452 */
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100453void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
454{
455 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
456 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
457 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
Imre Deaka8a40582016-01-19 15:26:28 +0200458 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100459
460 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
Imre Deaka8a40582016-01-19 15:26:28 +0200461 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
Chris Wilsone87666b2016-04-04 14:46:43 +0100462
463 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
464 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
Imre Deaka8a40582016-01-19 15:26:28 +0200465}
466
467/**
468 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
469 * @dev_priv: i915 device
470 *
471 * This function unregisters the i915 shrinker and OOM handler.
472 */
473void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
474{
Chris Wilsone87666b2016-04-04 14:46:43 +0100475 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
Imre Deaka8a40582016-01-19 15:26:28 +0200476 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
477 unregister_shrinker(&dev_priv->mm.shrinker);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100478}