blob: 891247d7929959d65a8e064007cd120610042cb4 [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilson80b204b2016-10-28 13:58:58 +010036static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
Chris Wilson9332f3b2016-08-04 16:32:17 +010037{
Chris Wilson80b204b2016-10-28 13:58:58 +010038 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson9332f3b2016-08-04 16:32:17 +010039 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +053040 enum intel_engine_id id;
Chris Wilson9332f3b2016-08-04 16:32:17 +010041
Akash Goel3b3f1652016-10-13 22:44:48 +053042 for_each_engine(engine, dev_priv, id) {
Chris Wilson80b204b2016-10-28 13:58:58 +010043 struct intel_timeline *tl;
44
45 tl = &ggtt->base.timeline.engine[engine->id];
46 if (i915_gem_active_isset(&tl->last_request))
Chris Wilson9332f3b2016-08-04 16:32:17 +010047 return false;
48 }
49
50 return true;
51}
52
53static bool
Chris Wilson9a71e272016-12-22 08:36:29 +000054mark_free(struct drm_mm_scan *scan,
55 struct i915_vma *vma,
56 unsigned int flags,
57 struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010058{
Chris Wilson20dfbde2016-08-04 16:32:30 +010059 if (i915_vma_is_pinned(vma))
Chris Wilson1b502472012-04-24 15:47:30 +010060 return false;
61
Chris Wilson275f0392016-10-24 13:42:14 +010062 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
Chris Wilson82118872016-08-18 17:17:05 +010063 return false;
64
Ben Widawsky82a55ad2013-08-14 11:38:34 +020065 list_add(&vma->exec_list, unwind);
Chris Wilson9a71e272016-12-22 08:36:29 +000066 return drm_mm_scan_add_block(scan, &vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010067}
68
Daniel Vetterc2c1d492014-01-29 22:07:11 +010069/**
70 * i915_gem_evict_something - Evict vmas to make room for binding a new one
Daniel Vetterc2c1d492014-01-29 22:07:11 +010071 * @vm: address space to evict from
Daniel Vetter7838a632015-01-05 14:36:59 +010072 * @min_size: size of the desired free space
Daniel Vetterc2c1d492014-01-29 22:07:11 +010073 * @alignment: alignment constraint of the desired free space
74 * @cache_level: cache_level for the desired space
Daniel Vetter7838a632015-01-05 14:36:59 +010075 * @start: start (inclusive) of the range from which to evict objects
76 * @end: end (exclusive) of the range from which to evict objects
77 * @flags: additional flags to control the eviction algorithm
Daniel Vetterc2c1d492014-01-29 22:07:11 +010078 *
79 * This function will try to evict vmas until a free space satisfying the
80 * requirements is found. Callers must check first whether any such hole exists
81 * already before calling this function.
82 *
83 * This function is used by the object/vma binding code.
84 *
Daniel Vettereb0b44a2015-03-18 14:47:59 +010085 * Since this function is only used to free up virtual address space it only
86 * ignores pinned vmas, and not object where the backing storage itself is
87 * pinned. Hence obj->pages_pin_count does not protect against eviction.
88 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +010089 * To clarify: This is for freeing up virtual address space, not for freeing
90 * memory in e.g. the shrinker.
91 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010092int
Chris Wilsone522ac22016-08-04 16:32:18 +010093i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +010094 u64 min_size, u64 alignment,
95 unsigned cache_level,
96 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +010097 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010098{
Chris Wilson49d73912016-11-29 09:50:08 +000099 struct drm_i915_private *dev_priv = vm->i915;
Chris Wilson9a71e272016-12-22 08:36:29 +0000100 struct drm_mm_scan scan;
Chris Wilson9332f3b2016-08-04 16:32:17 +0100101 struct list_head eviction_list;
102 struct list_head *phases[] = {
103 &vm->inactive_list,
104 &vm->active_list,
105 NULL,
106 }, **phase;
107 struct i915_vma *vma, *next;
Chris Wilson3fa489d2016-12-22 08:36:36 +0000108 struct drm_mm_node *node;
Chris Wilson4e64e552017-02-02 21:04:38 +0000109 enum drm_mm_insert_mode mode;
Chris Wilson9332f3b2016-08-04 16:32:17 +0100110 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100111
Chris Wilson49d73912016-11-29 09:50:08 +0000112 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Chris Wilsone522ac22016-08-04 16:32:18 +0100113 trace_i915_gem_evict(vm, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +0000114
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100115 /*
116 * The goal is to evict objects and amalgamate space in LRU order.
117 * The oldest idle objects reside on the inactive list, which is in
Chris Wilson9332f3b2016-08-04 16:32:17 +0100118 * retirement order. The next objects to retire are those in flight,
119 * on the active list, again in retirement order.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100120 *
121 * The retirement sequence is thus:
122 * 1. Inactive objects (already retired)
Chris Wilson9332f3b2016-08-04 16:32:17 +0100123 * 2. Active objects (will stall on unbinding)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100124 *
125 * On each list, the oldest objects lie at the HEAD with the freshest
126 * object on the TAIL.
127 */
Chris Wilson4e64e552017-02-02 21:04:38 +0000128 mode = DRM_MM_INSERT_BEST;
129 if (flags & PIN_HIGH)
130 mode = DRM_MM_INSERT_HIGH;
131 if (flags & PIN_MAPPABLE)
132 mode = DRM_MM_INSERT_LOW;
Chris Wilson2c4b3892016-12-22 08:36:31 +0000133 drm_mm_scan_init_with_range(&scan, &vm->mm,
134 min_size, alignment, cache_level,
Chris Wilson4e64e552017-02-02 21:04:38 +0000135 start, end, mode);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100136
Chris Wilson7155b052016-12-09 15:05:55 +0000137 /* Retire before we search the active list. Although we have
138 * reasonable accuracy in our retirement lists, we may have
139 * a stray pin (preventing eviction) that can only be resolved by
140 * retiring.
141 */
142 if (!(flags & PIN_NONBLOCK))
143 i915_gem_retire_requests(dev_priv);
144 else
Chris Wilson9332f3b2016-08-04 16:32:17 +0100145 phases[1] = NULL;
Chris Wilson86a1ee22012-08-11 15:41:04 +0100146
Chris Wilson9332f3b2016-08-04 16:32:17 +0100147search_again:
148 INIT_LIST_HEAD(&eviction_list);
149 phase = phases;
150 do {
151 list_for_each_entry(vma, *phase, vm_link)
Chris Wilson9a71e272016-12-22 08:36:29 +0000152 if (mark_free(&scan, vma, flags, &eviction_list))
Chris Wilson9332f3b2016-08-04 16:32:17 +0100153 goto found;
154 } while (*++phase);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100155
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100156 /* Nothing found, clean up and bail out! */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100157 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
Chris Wilson9a71e272016-12-22 08:36:29 +0000158 ret = drm_mm_scan_remove_block(&scan, &vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100159 BUG_ON(ret);
160 }
161
Chris Wilsonad071ac2013-12-09 10:37:24 +0000162 /* Can we unpin some objects such as idle hw contents,
Chris Wilson9332f3b2016-08-04 16:32:17 +0100163 * or pending flips? But since only the GGTT has global entries
164 * such as scanouts, rinbuffers and contexts, we can skip the
165 * purge when inspecting per-process local address spaces.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100166 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100167 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000168 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000169
Chris Wilson80b204b2016-10-28 13:58:58 +0100170 if (ggtt_is_idle(dev_priv)) {
Chris Wilson9332f3b2016-08-04 16:32:17 +0100171 /* If we still have pending pageflip completions, drop
172 * back to userspace to give our workqueues time to
173 * acquire our locks and unpin the old scanouts.
174 */
Chris Wilson49d73912016-11-29 09:50:08 +0000175 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
Chris Wilson74e21ac2014-01-20 10:17:37 +0000176 }
177
Chris Wilson9332f3b2016-08-04 16:32:17 +0100178 /* Not everything in the GGTT is tracked via vma (otherwise we
179 * could evict as required with minimal stalling) so we are forced
180 * to idle the GPU and explicitly retire outstanding requests in
181 * the hopes that we can then remove contexts and the like only
182 * bound by their active reference.
Chris Wilson74e21ac2014-01-20 10:17:37 +0000183 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100184 ret = i915_gem_switch_to_kernel_context(dev_priv);
185 if (ret)
186 return ret;
187
Chris Wilson22dd3bb2016-09-09 14:11:50 +0100188 ret = i915_gem_wait_for_idle(dev_priv,
189 I915_WAIT_INTERRUPTIBLE |
190 I915_WAIT_LOCKED);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100191 if (ret)
192 return ret;
193
Chris Wilson9332f3b2016-08-04 16:32:17 +0100194 goto search_again;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100195
196found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100197 /* drm_mm doesn't allow any other other operations while
Chris Wilson9332f3b2016-08-04 16:32:17 +0100198 * scanning, therefore store to-be-evicted objects on a
199 * temporary list and take a reference for all before
200 * calling unbind (which may remove the active reference
201 * of any of our objects, thus corrupting the list).
202 */
203 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
Chris Wilson9a71e272016-12-22 08:36:29 +0000204 if (drm_mm_scan_remove_block(&scan, &vma->node))
Chris Wilson20dfbde2016-08-04 16:32:30 +0100205 __i915_vma_pin(vma);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100206 else
Chris Wilsond55495b2017-06-15 09:14:34 +0100207 list_del(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100208 }
209
Chris Wilsone39a0152010-09-29 22:23:05 +0100210 /* Unbinding will emit any required flushes */
Chris Wilson121dfbb2017-01-05 15:59:40 +0000211 ret = 0;
Chris Wilsond55495b2017-06-15 09:14:34 +0100212 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100213 __i915_vma_unpin(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100214 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200215 ret = i915_vma_unbind(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100216 }
Chris Wilson3fa489d2016-12-22 08:36:36 +0000217
218 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
219 vma = container_of(node, struct i915_vma, node);
220 ret = i915_vma_unbind(vma);
221 }
222
Chris Wilsone39a0152010-09-29 22:23:05 +0100223 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100224}
225
Chris Wilson172ae5b2016-12-05 14:29:37 +0000226/**
227 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
Chris Wilson625d9882017-01-11 11:23:11 +0000228 * @vm: address space to evict from
229 * @target: range (and color) to evict for
Chris Wilson172ae5b2016-12-05 14:29:37 +0000230 * @flags: additional flags to control the eviction algorithm
231 *
232 * This function will try to evict vmas that overlap the target node.
233 *
234 * To clarify: This is for freeing up virtual address space, not for freeing
235 * memory in e.g. the shrinker.
236 */
Chris Wilson625d9882017-01-11 11:23:11 +0000237int i915_gem_evict_for_node(struct i915_address_space *vm,
238 struct drm_mm_node *target,
239 unsigned int flags)
Chris Wilson506a8e82015-12-08 11:55:07 +0000240{
Chris Wilson172ae5b2016-12-05 14:29:37 +0000241 LIST_HEAD(eviction_list);
242 struct drm_mm_node *node;
Chris Wilson625d9882017-01-11 11:23:11 +0000243 u64 start = target->start;
244 u64 end = start + target->size;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000245 struct i915_vma *vma, *next;
246 bool check_color;
247 int ret = 0;
Chris Wilson506a8e82015-12-08 11:55:07 +0000248
Chris Wilson625d9882017-01-11 11:23:11 +0000249 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Chris Wilsona6508de2017-02-06 08:45:47 +0000250 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
251 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
252
Chris Wilson625d9882017-01-11 11:23:11 +0000253 trace_i915_gem_evict_node(vm, target, flags);
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100254
Chris Wilson7155b052016-12-09 15:05:55 +0000255 /* Retire before we search the active list. Although we have
256 * reasonable accuracy in our retirement lists, we may have
257 * a stray pin (preventing eviction) that can only be resolved by
258 * retiring.
259 */
260 if (!(flags & PIN_NONBLOCK))
Chris Wilson625d9882017-01-11 11:23:11 +0000261 i915_gem_retire_requests(vm->i915);
Chris Wilson7155b052016-12-09 15:05:55 +0000262
Chris Wilson625d9882017-01-11 11:23:11 +0000263 check_color = vm->mm.color_adjust;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000264 if (check_color) {
265 /* Expand search to cover neighbouring guard pages (or lack!) */
Chris Wilson381b9432017-02-15 08:43:54 +0000266 if (start)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000267 start -= I915_GTT_PAGE_SIZE;
Chris Wilsona6508de2017-02-06 08:45:47 +0000268
269 /* Always look at the page afterwards to avoid the end-of-GTT */
270 end += I915_GTT_PAGE_SIZE;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000271 }
Chris Wilsona6508de2017-02-06 08:45:47 +0000272 GEM_BUG_ON(start >= end);
Chris Wilson506a8e82015-12-08 11:55:07 +0000273
Chris Wilson625d9882017-01-11 11:23:11 +0000274 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000275 /* If we find any non-objects (!vma), we cannot evict them */
276 if (node->color == I915_COLOR_UNEVICTABLE) {
277 ret = -ENOSPC;
Chris Wilson506a8e82015-12-08 11:55:07 +0000278 break;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000279 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000280
Chris Wilsona6508de2017-02-06 08:45:47 +0000281 GEM_BUG_ON(!node->allocated);
Chris Wilson506a8e82015-12-08 11:55:07 +0000282 vma = container_of(node, typeof(*vma), node);
283
Chris Wilson172ae5b2016-12-05 14:29:37 +0000284 /* If we are using coloring to insert guard pages between
285 * different cache domains within the address space, we have
286 * to check whether the objects on either side of our range
287 * abutt and conflict. If they are in conflict, then we evict
288 * those as well to make room for our guard pages.
289 */
290 if (check_color) {
Matthew Auldfe65cbd2017-03-06 23:54:01 +0000291 if (node->start + node->size == target->start) {
292 if (node->color == target->color)
Chris Wilson172ae5b2016-12-05 14:29:37 +0000293 continue;
294 }
Matthew Auldfe65cbd2017-03-06 23:54:01 +0000295 if (node->start == target->start + target->size) {
296 if (node->color == target->color)
Chris Wilson172ae5b2016-12-05 14:29:37 +0000297 continue;
298 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000299 }
300
Chris Wilson172ae5b2016-12-05 14:29:37 +0000301 if (flags & PIN_NONBLOCK &&
302 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
303 ret = -ENOSPC;
304 break;
305 }
306
307 /* Overlap of objects in the same batch? */
Chris Wilsond55495b2017-06-15 09:14:34 +0100308 if (i915_vma_is_pinned(vma)) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000309 ret = -ENOSPC;
310 if (vma->exec_entry &&
311 vma->exec_entry->flags & EXEC_OBJECT_PINNED)
312 ret = -EINVAL;
313 break;
314 }
315
316 /* Never show fear in the face of dragons!
317 *
318 * We cannot directly remove this node from within this
319 * iterator and as with i915_gem_evict_something() we employ
320 * the vma pin_count in order to prevent the action of
321 * unbinding one vma from freeing (by dropping its active
322 * reference) another in our eviction list.
323 */
324 __i915_vma_pin(vma);
325 list_add(&vma->exec_list, &eviction_list);
Chris Wilson506a8e82015-12-08 11:55:07 +0000326 }
327
Chris Wilson172ae5b2016-12-05 14:29:37 +0000328 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000329 __i915_vma_unpin(vma);
330 if (ret == 0)
331 ret = i915_vma_unbind(vma);
332 }
333
334 return ret;
Chris Wilson506a8e82015-12-08 11:55:07 +0000335}
336
Ben Widawsky68c8c172013-09-11 14:57:50 -0700337/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100338 * i915_gem_evict_vm - Evict all idle vmas from a vm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100339 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700340 * @do_idle: Boolean directing whether to idle first.
341 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100342 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
343 * evicted the @do_idle needs to be set to true.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700344 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100345 * This is used by the execbuf code as a last-ditch effort to defragment the
346 * address space.
347 *
348 * To clarify: This is for freeing up virtual address space, not for freeing
349 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700350 */
351int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700352{
353 struct i915_vma *vma, *next;
354 int ret;
355
Chris Wilson49d73912016-11-29 09:50:08 +0000356 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Ben Widawskybcccff82013-09-24 09:57:56 -0700357 trace_i915_gem_evict_vm(vm);
358
Ben Widawsky7b796122013-09-11 14:57:49 -0700359 if (do_idle) {
Chris Wilson49d73912016-11-29 09:50:08 +0000360 struct drm_i915_private *dev_priv = vm->i915;
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100361
Chris Wilson883445d2016-06-24 14:55:58 +0100362 if (i915_is_ggtt(vm)) {
Chris Wilson945657b2016-07-15 14:56:19 +0100363 ret = i915_gem_switch_to_kernel_context(dev_priv);
Chris Wilson883445d2016-06-24 14:55:58 +0100364 if (ret)
365 return ret;
366 }
Ben Widawsky7b796122013-09-11 14:57:49 -0700367
Chris Wilson22dd3bb2016-09-09 14:11:50 +0100368 ret = i915_gem_wait_for_idle(dev_priv,
369 I915_WAIT_INTERRUPTIBLE |
370 I915_WAIT_LOCKED);
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100371 if (ret)
372 return ret;
373
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000374 WARN_ON(!list_empty(&vm->active_list));
Ben Widawsky7b796122013-09-11 14:57:49 -0700375 }
376
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000377 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100378 if (!i915_vma_is_pinned(vma))
Ben Widawsky7b796122013-09-11 14:57:49 -0700379 WARN_ON(i915_vma_unbind(vma));
380
381 return 0;
382}
Chris Wilsonf40a7b72017-02-13 17:15:52 +0000383
384#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
385#include "selftests/i915_gem_evict.c"
386#endif