blob: a193f1b36c679e3414dc3b7cfb3bc634e6d394c6 [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilson80b204b2016-10-28 13:58:58 +010036static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
Chris Wilson9332f3b2016-08-04 16:32:17 +010037{
Chris Wilson80b204b2016-10-28 13:58:58 +010038 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson9332f3b2016-08-04 16:32:17 +010039 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +053040 enum intel_engine_id id;
Chris Wilson9332f3b2016-08-04 16:32:17 +010041
Akash Goel3b3f1652016-10-13 22:44:48 +053042 for_each_engine(engine, dev_priv, id) {
Chris Wilson80b204b2016-10-28 13:58:58 +010043 struct intel_timeline *tl;
44
45 tl = &ggtt->base.timeline.engine[engine->id];
46 if (i915_gem_active_isset(&tl->last_request))
Chris Wilson9332f3b2016-08-04 16:32:17 +010047 return false;
48 }
49
50 return true;
51}
52
Chris Wilson2889caa2017-06-16 15:05:19 +010053static int ggtt_flush(struct drm_i915_private *i915)
54{
55 int err;
56
57 /* Not everything in the GGTT is tracked via vma (otherwise we
58 * could evict as required with minimal stalling) so we are forced
59 * to idle the GPU and explicitly retire outstanding requests in
60 * the hopes that we can then remove contexts and the like only
61 * bound by their active reference.
62 */
63 err = i915_gem_switch_to_kernel_context(i915);
64 if (err)
65 return err;
66
67 err = i915_gem_wait_for_idle(i915,
68 I915_WAIT_INTERRUPTIBLE |
69 I915_WAIT_LOCKED);
70 if (err)
71 return err;
72
73 return 0;
74}
75
Chris Wilson9332f3b2016-08-04 16:32:17 +010076static bool
Chris Wilson9a71e272016-12-22 08:36:29 +000077mark_free(struct drm_mm_scan *scan,
78 struct i915_vma *vma,
79 unsigned int flags,
80 struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010081{
Chris Wilson20dfbde2016-08-04 16:32:30 +010082 if (i915_vma_is_pinned(vma))
Chris Wilson1b502472012-04-24 15:47:30 +010083 return false;
84
Chris Wilson275f0392016-10-24 13:42:14 +010085 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
Chris Wilson82118872016-08-18 17:17:05 +010086 return false;
87
Chris Wilson8c45cec2017-06-15 09:14:35 +010088 list_add(&vma->evict_link, unwind);
Chris Wilson9a71e272016-12-22 08:36:29 +000089 return drm_mm_scan_add_block(scan, &vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010090}
91
Daniel Vetterc2c1d492014-01-29 22:07:11 +010092/**
93 * i915_gem_evict_something - Evict vmas to make room for binding a new one
Daniel Vetterc2c1d492014-01-29 22:07:11 +010094 * @vm: address space to evict from
Daniel Vetter7838a632015-01-05 14:36:59 +010095 * @min_size: size of the desired free space
Daniel Vetterc2c1d492014-01-29 22:07:11 +010096 * @alignment: alignment constraint of the desired free space
97 * @cache_level: cache_level for the desired space
Daniel Vetter7838a632015-01-05 14:36:59 +010098 * @start: start (inclusive) of the range from which to evict objects
99 * @end: end (exclusive) of the range from which to evict objects
100 * @flags: additional flags to control the eviction algorithm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100101 *
102 * This function will try to evict vmas until a free space satisfying the
103 * requirements is found. Callers must check first whether any such hole exists
104 * already before calling this function.
105 *
106 * This function is used by the object/vma binding code.
107 *
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100108 * Since this function is only used to free up virtual address space it only
109 * ignores pinned vmas, and not object where the backing storage itself is
110 * pinned. Hence obj->pages_pin_count does not protect against eviction.
111 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100112 * To clarify: This is for freeing up virtual address space, not for freeing
113 * memory in e.g. the shrinker.
114 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100115int
Chris Wilsone522ac22016-08-04 16:32:18 +0100116i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +0100117 u64 min_size, u64 alignment,
118 unsigned cache_level,
119 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100120 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100121{
Chris Wilson49d73912016-11-29 09:50:08 +0000122 struct drm_i915_private *dev_priv = vm->i915;
Chris Wilson9a71e272016-12-22 08:36:29 +0000123 struct drm_mm_scan scan;
Chris Wilson9332f3b2016-08-04 16:32:17 +0100124 struct list_head eviction_list;
125 struct list_head *phases[] = {
126 &vm->inactive_list,
127 &vm->active_list,
128 NULL,
129 }, **phase;
130 struct i915_vma *vma, *next;
Chris Wilson3fa489d2016-12-22 08:36:36 +0000131 struct drm_mm_node *node;
Chris Wilson4e64e552017-02-02 21:04:38 +0000132 enum drm_mm_insert_mode mode;
Chris Wilson9332f3b2016-08-04 16:32:17 +0100133 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100134
Chris Wilson49d73912016-11-29 09:50:08 +0000135 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Chris Wilsone522ac22016-08-04 16:32:18 +0100136 trace_i915_gem_evict(vm, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +0000137
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100138 /*
139 * The goal is to evict objects and amalgamate space in LRU order.
140 * The oldest idle objects reside on the inactive list, which is in
Chris Wilson9332f3b2016-08-04 16:32:17 +0100141 * retirement order. The next objects to retire are those in flight,
142 * on the active list, again in retirement order.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100143 *
144 * The retirement sequence is thus:
145 * 1. Inactive objects (already retired)
Chris Wilson9332f3b2016-08-04 16:32:17 +0100146 * 2. Active objects (will stall on unbinding)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100147 *
148 * On each list, the oldest objects lie at the HEAD with the freshest
149 * object on the TAIL.
150 */
Chris Wilson4e64e552017-02-02 21:04:38 +0000151 mode = DRM_MM_INSERT_BEST;
152 if (flags & PIN_HIGH)
153 mode = DRM_MM_INSERT_HIGH;
154 if (flags & PIN_MAPPABLE)
155 mode = DRM_MM_INSERT_LOW;
Chris Wilson2c4b3892016-12-22 08:36:31 +0000156 drm_mm_scan_init_with_range(&scan, &vm->mm,
157 min_size, alignment, cache_level,
Chris Wilson4e64e552017-02-02 21:04:38 +0000158 start, end, mode);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100159
Chris Wilson7155b052016-12-09 15:05:55 +0000160 /* Retire before we search the active list. Although we have
161 * reasonable accuracy in our retirement lists, we may have
162 * a stray pin (preventing eviction) that can only be resolved by
163 * retiring.
164 */
165 if (!(flags & PIN_NONBLOCK))
166 i915_gem_retire_requests(dev_priv);
167 else
Chris Wilson9332f3b2016-08-04 16:32:17 +0100168 phases[1] = NULL;
Chris Wilson86a1ee22012-08-11 15:41:04 +0100169
Chris Wilson9332f3b2016-08-04 16:32:17 +0100170search_again:
171 INIT_LIST_HEAD(&eviction_list);
172 phase = phases;
173 do {
174 list_for_each_entry(vma, *phase, vm_link)
Chris Wilson9a71e272016-12-22 08:36:29 +0000175 if (mark_free(&scan, vma, flags, &eviction_list))
Chris Wilson9332f3b2016-08-04 16:32:17 +0100176 goto found;
177 } while (*++phase);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100178
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100179 /* Nothing found, clean up and bail out! */
Chris Wilson8c45cec2017-06-15 09:14:35 +0100180 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
Chris Wilson9a71e272016-12-22 08:36:29 +0000181 ret = drm_mm_scan_remove_block(&scan, &vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100182 BUG_ON(ret);
183 }
184
Chris Wilsonad071ac2013-12-09 10:37:24 +0000185 /* Can we unpin some objects such as idle hw contents,
Chris Wilson9332f3b2016-08-04 16:32:17 +0100186 * or pending flips? But since only the GGTT has global entries
187 * such as scanouts, rinbuffers and contexts, we can skip the
188 * purge when inspecting per-process local address spaces.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100189 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100190 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000191 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000192
Chris Wilson80b204b2016-10-28 13:58:58 +0100193 if (ggtt_is_idle(dev_priv)) {
Chris Wilson9332f3b2016-08-04 16:32:17 +0100194 /* If we still have pending pageflip completions, drop
195 * back to userspace to give our workqueues time to
196 * acquire our locks and unpin the old scanouts.
197 */
Chris Wilson49d73912016-11-29 09:50:08 +0000198 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
Chris Wilson74e21ac2014-01-20 10:17:37 +0000199 }
200
Chris Wilson2889caa2017-06-16 15:05:19 +0100201 ret = ggtt_flush(dev_priv);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100202 if (ret)
203 return ret;
204
Chris Wilson9332f3b2016-08-04 16:32:17 +0100205 goto search_again;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100206
207found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100208 /* drm_mm doesn't allow any other other operations while
Chris Wilson9332f3b2016-08-04 16:32:17 +0100209 * scanning, therefore store to-be-evicted objects on a
210 * temporary list and take a reference for all before
211 * calling unbind (which may remove the active reference
212 * of any of our objects, thus corrupting the list).
213 */
Chris Wilson8c45cec2017-06-15 09:14:35 +0100214 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
Chris Wilson9a71e272016-12-22 08:36:29 +0000215 if (drm_mm_scan_remove_block(&scan, &vma->node))
Chris Wilson20dfbde2016-08-04 16:32:30 +0100216 __i915_vma_pin(vma);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100217 else
Chris Wilson8c45cec2017-06-15 09:14:35 +0100218 list_del(&vma->evict_link);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100219 }
220
Chris Wilsone39a0152010-09-29 22:23:05 +0100221 /* Unbinding will emit any required flushes */
Chris Wilson121dfbb2017-01-05 15:59:40 +0000222 ret = 0;
Chris Wilson8c45cec2017-06-15 09:14:35 +0100223 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100224 __i915_vma_unpin(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100225 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200226 ret = i915_vma_unbind(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100227 }
Chris Wilson3fa489d2016-12-22 08:36:36 +0000228
229 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
230 vma = container_of(node, struct i915_vma, node);
231 ret = i915_vma_unbind(vma);
232 }
233
Chris Wilsone39a0152010-09-29 22:23:05 +0100234 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100235}
236
Chris Wilson172ae5b2016-12-05 14:29:37 +0000237/**
238 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
Chris Wilson625d9882017-01-11 11:23:11 +0000239 * @vm: address space to evict from
240 * @target: range (and color) to evict for
Chris Wilson172ae5b2016-12-05 14:29:37 +0000241 * @flags: additional flags to control the eviction algorithm
242 *
243 * This function will try to evict vmas that overlap the target node.
244 *
245 * To clarify: This is for freeing up virtual address space, not for freeing
246 * memory in e.g. the shrinker.
247 */
Chris Wilson625d9882017-01-11 11:23:11 +0000248int i915_gem_evict_for_node(struct i915_address_space *vm,
249 struct drm_mm_node *target,
250 unsigned int flags)
Chris Wilson506a8e82015-12-08 11:55:07 +0000251{
Chris Wilson172ae5b2016-12-05 14:29:37 +0000252 LIST_HEAD(eviction_list);
253 struct drm_mm_node *node;
Chris Wilson625d9882017-01-11 11:23:11 +0000254 u64 start = target->start;
255 u64 end = start + target->size;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000256 struct i915_vma *vma, *next;
257 bool check_color;
258 int ret = 0;
Chris Wilson506a8e82015-12-08 11:55:07 +0000259
Chris Wilson625d9882017-01-11 11:23:11 +0000260 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Chris Wilsona6508de2017-02-06 08:45:47 +0000261 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
262 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
263
Chris Wilson625d9882017-01-11 11:23:11 +0000264 trace_i915_gem_evict_node(vm, target, flags);
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100265
Chris Wilson7155b052016-12-09 15:05:55 +0000266 /* Retire before we search the active list. Although we have
267 * reasonable accuracy in our retirement lists, we may have
268 * a stray pin (preventing eviction) that can only be resolved by
269 * retiring.
270 */
271 if (!(flags & PIN_NONBLOCK))
Chris Wilson625d9882017-01-11 11:23:11 +0000272 i915_gem_retire_requests(vm->i915);
Chris Wilson7155b052016-12-09 15:05:55 +0000273
Chris Wilson625d9882017-01-11 11:23:11 +0000274 check_color = vm->mm.color_adjust;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000275 if (check_color) {
276 /* Expand search to cover neighbouring guard pages (or lack!) */
Chris Wilson381b9432017-02-15 08:43:54 +0000277 if (start)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000278 start -= I915_GTT_PAGE_SIZE;
Chris Wilsona6508de2017-02-06 08:45:47 +0000279
280 /* Always look at the page afterwards to avoid the end-of-GTT */
281 end += I915_GTT_PAGE_SIZE;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000282 }
Chris Wilsona6508de2017-02-06 08:45:47 +0000283 GEM_BUG_ON(start >= end);
Chris Wilson506a8e82015-12-08 11:55:07 +0000284
Chris Wilson625d9882017-01-11 11:23:11 +0000285 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000286 /* If we find any non-objects (!vma), we cannot evict them */
287 if (node->color == I915_COLOR_UNEVICTABLE) {
288 ret = -ENOSPC;
Chris Wilson506a8e82015-12-08 11:55:07 +0000289 break;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000290 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000291
Chris Wilsona6508de2017-02-06 08:45:47 +0000292 GEM_BUG_ON(!node->allocated);
Chris Wilson506a8e82015-12-08 11:55:07 +0000293 vma = container_of(node, typeof(*vma), node);
294
Chris Wilson172ae5b2016-12-05 14:29:37 +0000295 /* If we are using coloring to insert guard pages between
296 * different cache domains within the address space, we have
297 * to check whether the objects on either side of our range
298 * abutt and conflict. If they are in conflict, then we evict
299 * those as well to make room for our guard pages.
300 */
301 if (check_color) {
Matthew Auldfe65cbd2017-03-06 23:54:01 +0000302 if (node->start + node->size == target->start) {
303 if (node->color == target->color)
Chris Wilson172ae5b2016-12-05 14:29:37 +0000304 continue;
305 }
Matthew Auldfe65cbd2017-03-06 23:54:01 +0000306 if (node->start == target->start + target->size) {
307 if (node->color == target->color)
Chris Wilson172ae5b2016-12-05 14:29:37 +0000308 continue;
309 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000310 }
311
Chris Wilson172ae5b2016-12-05 14:29:37 +0000312 if (flags & PIN_NONBLOCK &&
313 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
314 ret = -ENOSPC;
315 break;
316 }
317
318 /* Overlap of objects in the same batch? */
Chris Wilsond55495b2017-06-15 09:14:34 +0100319 if (i915_vma_is_pinned(vma)) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000320 ret = -ENOSPC;
321 if (vma->exec_entry &&
322 vma->exec_entry->flags & EXEC_OBJECT_PINNED)
323 ret = -EINVAL;
324 break;
325 }
326
327 /* Never show fear in the face of dragons!
328 *
329 * We cannot directly remove this node from within this
330 * iterator and as with i915_gem_evict_something() we employ
331 * the vma pin_count in order to prevent the action of
332 * unbinding one vma from freeing (by dropping its active
333 * reference) another in our eviction list.
334 */
335 __i915_vma_pin(vma);
Chris Wilson8c45cec2017-06-15 09:14:35 +0100336 list_add(&vma->evict_link, &eviction_list);
Chris Wilson506a8e82015-12-08 11:55:07 +0000337 }
338
Chris Wilson8c45cec2017-06-15 09:14:35 +0100339 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
Chris Wilson172ae5b2016-12-05 14:29:37 +0000340 __i915_vma_unpin(vma);
341 if (ret == 0)
342 ret = i915_vma_unbind(vma);
343 }
344
345 return ret;
Chris Wilson506a8e82015-12-08 11:55:07 +0000346}
347
Ben Widawsky68c8c172013-09-11 14:57:50 -0700348/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100349 * i915_gem_evict_vm - Evict all idle vmas from a vm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100350 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700351 *
Chris Wilson2889caa2017-06-16 15:05:19 +0100352 * This function evicts all vmas from a vm.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700353 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100354 * This is used by the execbuf code as a last-ditch effort to defragment the
355 * address space.
356 *
357 * To clarify: This is for freeing up virtual address space, not for freeing
358 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700359 */
Chris Wilson2889caa2017-06-16 15:05:19 +0100360int i915_gem_evict_vm(struct i915_address_space *vm)
Ben Widawsky7b796122013-09-11 14:57:49 -0700361{
Chris Wilson2889caa2017-06-16 15:05:19 +0100362 struct list_head *phases[] = {
363 &vm->inactive_list,
364 &vm->active_list,
365 NULL
366 }, **phase;
367 struct list_head eviction_list;
Ben Widawsky7b796122013-09-11 14:57:49 -0700368 struct i915_vma *vma, *next;
369 int ret;
370
Chris Wilson49d73912016-11-29 09:50:08 +0000371 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Ben Widawskybcccff82013-09-24 09:57:56 -0700372 trace_i915_gem_evict_vm(vm);
373
Chris Wilson2889caa2017-06-16 15:05:19 +0100374 /* Switch back to the default context in order to unpin
375 * the existing context objects. However, such objects only
376 * pin themselves inside the global GTT and performing the
377 * switch otherwise is ineffective.
378 */
379 if (i915_is_ggtt(vm)) {
380 ret = ggtt_flush(vm->i915);
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100381 if (ret)
382 return ret;
Ben Widawsky7b796122013-09-11 14:57:49 -0700383 }
384
Chris Wilson2889caa2017-06-16 15:05:19 +0100385 INIT_LIST_HEAD(&eviction_list);
386 phase = phases;
387 do {
388 list_for_each_entry(vma, *phase, vm_link) {
389 if (i915_vma_is_pinned(vma))
390 continue;
Ben Widawsky7b796122013-09-11 14:57:49 -0700391
Chris Wilson2889caa2017-06-16 15:05:19 +0100392 __i915_vma_pin(vma);
393 list_add(&vma->evict_link, &eviction_list);
394 }
395 } while (*++phase);
396
397 ret = 0;
398 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
399 __i915_vma_unpin(vma);
400 if (ret == 0)
401 ret = i915_vma_unbind(vma);
402 }
403 return ret;
Ben Widawsky7b796122013-09-11 14:57:49 -0700404}
Chris Wilsonf40a7b72017-02-13 17:15:52 +0000405
406#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
407#include "selftests/i915_gem_evict.c"
408#endif