blob: fa40100146ea8de65e5a2619d2860e10be301fc3 [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilson80b204b2016-10-28 13:58:58 +010036static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
Chris Wilson9332f3b2016-08-04 16:32:17 +010037{
Chris Wilson80b204b2016-10-28 13:58:58 +010038 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Chris Wilson9332f3b2016-08-04 16:32:17 +010039 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +053040 enum intel_engine_id id;
Chris Wilson9332f3b2016-08-04 16:32:17 +010041
Akash Goel3b3f1652016-10-13 22:44:48 +053042 for_each_engine(engine, dev_priv, id) {
Chris Wilson80b204b2016-10-28 13:58:58 +010043 struct intel_timeline *tl;
44
45 tl = &ggtt->base.timeline.engine[engine->id];
46 if (i915_gem_active_isset(&tl->last_request))
Chris Wilson9332f3b2016-08-04 16:32:17 +010047 return false;
48 }
49
50 return true;
51}
52
53static bool
Chris Wilson82118872016-08-18 17:17:05 +010054mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010055{
Chris Wilson20dfbde2016-08-04 16:32:30 +010056 if (i915_vma_is_pinned(vma))
Chris Wilson1b502472012-04-24 15:47:30 +010057 return false;
58
Daniel Vetterb93dab62013-08-26 11:23:47 +020059 if (WARN_ON(!list_empty(&vma->exec_list)))
60 return false;
61
Chris Wilson275f0392016-10-24 13:42:14 +010062 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
Chris Wilson82118872016-08-18 17:17:05 +010063 return false;
64
Ben Widawsky82a55ad2013-08-14 11:38:34 +020065 list_add(&vma->exec_list, unwind);
Ben Widawsky2f633152013-07-17 12:19:03 -070066 return drm_mm_scan_add_block(&vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010067}
68
Daniel Vetterc2c1d492014-01-29 22:07:11 +010069/**
70 * i915_gem_evict_something - Evict vmas to make room for binding a new one
Daniel Vetterc2c1d492014-01-29 22:07:11 +010071 * @vm: address space to evict from
Daniel Vetter7838a632015-01-05 14:36:59 +010072 * @min_size: size of the desired free space
Daniel Vetterc2c1d492014-01-29 22:07:11 +010073 * @alignment: alignment constraint of the desired free space
74 * @cache_level: cache_level for the desired space
Daniel Vetter7838a632015-01-05 14:36:59 +010075 * @start: start (inclusive) of the range from which to evict objects
76 * @end: end (exclusive) of the range from which to evict objects
77 * @flags: additional flags to control the eviction algorithm
Daniel Vetterc2c1d492014-01-29 22:07:11 +010078 *
79 * This function will try to evict vmas until a free space satisfying the
80 * requirements is found. Callers must check first whether any such hole exists
81 * already before calling this function.
82 *
83 * This function is used by the object/vma binding code.
84 *
Daniel Vettereb0b44a2015-03-18 14:47:59 +010085 * Since this function is only used to free up virtual address space it only
86 * ignores pinned vmas, and not object where the backing storage itself is
87 * pinned. Hence obj->pages_pin_count does not protect against eviction.
88 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +010089 * To clarify: This is for freeing up virtual address space, not for freeing
90 * memory in e.g. the shrinker.
91 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010092int
Chris Wilsone522ac22016-08-04 16:32:18 +010093i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +010094 u64 min_size, u64 alignment,
95 unsigned cache_level,
96 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +010097 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010098{
Chris Wilson49d73912016-11-29 09:50:08 +000099 struct drm_i915_private *dev_priv = vm->i915;
Chris Wilson9332f3b2016-08-04 16:32:17 +0100100 struct list_head eviction_list;
101 struct list_head *phases[] = {
102 &vm->inactive_list,
103 &vm->active_list,
104 NULL,
105 }, **phase;
106 struct i915_vma *vma, *next;
107 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100108
Chris Wilson49d73912016-11-29 09:50:08 +0000109 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Chris Wilsone522ac22016-08-04 16:32:18 +0100110 trace_i915_gem_evict(vm, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +0000111
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100112 /*
113 * The goal is to evict objects and amalgamate space in LRU order.
114 * The oldest idle objects reside on the inactive list, which is in
Chris Wilson9332f3b2016-08-04 16:32:17 +0100115 * retirement order. The next objects to retire are those in flight,
116 * on the active list, again in retirement order.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100117 *
118 * The retirement sequence is thus:
119 * 1. Inactive objects (already retired)
Chris Wilson9332f3b2016-08-04 16:32:17 +0100120 * 2. Active objects (will stall on unbinding)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100121 *
122 * On each list, the oldest objects lie at the HEAD with the freshest
123 * object on the TAIL.
124 */
Chris Wilsond23db882014-05-23 08:48:08 +0200125 if (start != 0 || end != vm->total) {
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700126 drm_mm_init_scan_with_range(&vm->mm, min_size,
Chris Wilsond23db882014-05-23 08:48:08 +0200127 alignment, cache_level,
128 start, end);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700129 } else
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700130 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100131
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100132 if (flags & PIN_NONBLOCK)
Chris Wilson9332f3b2016-08-04 16:32:17 +0100133 phases[1] = NULL;
Chris Wilson86a1ee22012-08-11 15:41:04 +0100134
Chris Wilson9332f3b2016-08-04 16:32:17 +0100135search_again:
136 INIT_LIST_HEAD(&eviction_list);
137 phase = phases;
138 do {
139 list_for_each_entry(vma, *phase, vm_link)
Chris Wilson82118872016-08-18 17:17:05 +0100140 if (mark_free(vma, flags, &eviction_list))
Chris Wilson9332f3b2016-08-04 16:32:17 +0100141 goto found;
142 } while (*++phase);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100143
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100144 /* Nothing found, clean up and bail out! */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100145 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
Ben Widawsky2f633152013-07-17 12:19:03 -0700146 ret = drm_mm_scan_remove_block(&vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100147 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000148
Chris Wilson9332f3b2016-08-04 16:32:17 +0100149 INIT_LIST_HEAD(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100150 }
151
Chris Wilsonad071ac2013-12-09 10:37:24 +0000152 /* Can we unpin some objects such as idle hw contents,
Chris Wilson9332f3b2016-08-04 16:32:17 +0100153 * or pending flips? But since only the GGTT has global entries
154 * such as scanouts, rinbuffers and contexts, we can skip the
155 * purge when inspecting per-process local address spaces.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100156 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100157 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000158 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000159
Chris Wilson80b204b2016-10-28 13:58:58 +0100160 if (ggtt_is_idle(dev_priv)) {
Chris Wilson9332f3b2016-08-04 16:32:17 +0100161 /* If we still have pending pageflip completions, drop
162 * back to userspace to give our workqueues time to
163 * acquire our locks and unpin the old scanouts.
164 */
Chris Wilson49d73912016-11-29 09:50:08 +0000165 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
Chris Wilson74e21ac2014-01-20 10:17:37 +0000166 }
167
Chris Wilson9332f3b2016-08-04 16:32:17 +0100168 /* Not everything in the GGTT is tracked via vma (otherwise we
169 * could evict as required with minimal stalling) so we are forced
170 * to idle the GPU and explicitly retire outstanding requests in
171 * the hopes that we can then remove contexts and the like only
172 * bound by their active reference.
Chris Wilson74e21ac2014-01-20 10:17:37 +0000173 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100174 ret = i915_gem_switch_to_kernel_context(dev_priv);
175 if (ret)
176 return ret;
177
Chris Wilson22dd3bb2016-09-09 14:11:50 +0100178 ret = i915_gem_wait_for_idle(dev_priv,
179 I915_WAIT_INTERRUPTIBLE |
180 I915_WAIT_LOCKED);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100181 if (ret)
182 return ret;
183
184 i915_gem_retire_requests(dev_priv);
185 goto search_again;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100186
187found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100188 /* drm_mm doesn't allow any other other operations while
Chris Wilson9332f3b2016-08-04 16:32:17 +0100189 * scanning, therefore store to-be-evicted objects on a
190 * temporary list and take a reference for all before
191 * calling unbind (which may remove the active reference
192 * of any of our objects, thus corrupting the list).
193 */
194 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
195 if (drm_mm_scan_remove_block(&vma->node))
Chris Wilson20dfbde2016-08-04 16:32:30 +0100196 __i915_vma_pin(vma);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100197 else
198 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100199 }
200
Chris Wilsone39a0152010-09-29 22:23:05 +0100201 /* Unbinding will emit any required flushes */
202 while (!list_empty(&eviction_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200203 vma = list_first_entry(&eviction_list,
204 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000205 exec_list);
Ben Widawsky8637b402013-08-16 13:29:33 -0700206
Ben Widawsky8637b402013-08-16 13:29:33 -0700207 list_del_init(&vma->exec_list);
Chris Wilson20dfbde2016-08-04 16:32:30 +0100208 __i915_vma_unpin(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100209 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200210 ret = i915_vma_unbind(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100211 }
Chris Wilsone39a0152010-09-29 22:23:05 +0100212 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100213}
214
Chris Wilson172ae5b2016-12-05 14:29:37 +0000215/**
216 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
217 * @target: address space and range to evict for
218 * @flags: additional flags to control the eviction algorithm
219 *
220 * This function will try to evict vmas that overlap the target node.
221 *
222 * To clarify: This is for freeing up virtual address space, not for freeing
223 * memory in e.g. the shrinker.
224 */
225int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
Chris Wilson506a8e82015-12-08 11:55:07 +0000226{
Chris Wilson172ae5b2016-12-05 14:29:37 +0000227 LIST_HEAD(eviction_list);
228 struct drm_mm_node *node;
229 u64 start = target->node.start;
230 u64 end = start + target->node.size;
231 struct i915_vma *vma, *next;
232 bool check_color;
233 int ret = 0;
Chris Wilson506a8e82015-12-08 11:55:07 +0000234
Chris Wilson49d73912016-11-29 09:50:08 +0000235 lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
Chris Wilson172ae5b2016-12-05 14:29:37 +0000236 trace_i915_gem_evict_vma(target, flags);
Chris Wilson4c7d62c2016-10-28 13:58:32 +0100237
Chris Wilson172ae5b2016-12-05 14:29:37 +0000238 check_color = target->vm->mm.color_adjust;
239 if (check_color) {
240 /* Expand search to cover neighbouring guard pages (or lack!) */
241 if (start > target->vm->start)
242 start -= 4096;
243 if (end < target->vm->start + target->vm->total)
244 end += 4096;
245 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000246
Chris Wilson172ae5b2016-12-05 14:29:37 +0000247 drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
248 /* If we find any non-objects (!vma), we cannot evict them */
249 if (node->color == I915_COLOR_UNEVICTABLE) {
250 ret = -ENOSPC;
Chris Wilson506a8e82015-12-08 11:55:07 +0000251 break;
Chris Wilson172ae5b2016-12-05 14:29:37 +0000252 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000253
254 vma = container_of(node, typeof(*vma), node);
255
Chris Wilson172ae5b2016-12-05 14:29:37 +0000256 /* If we are using coloring to insert guard pages between
257 * different cache domains within the address space, we have
258 * to check whether the objects on either side of our range
259 * abutt and conflict. If they are in conflict, then we evict
260 * those as well to make room for our guard pages.
261 */
262 if (check_color) {
263 if (vma->node.start + vma->node.size == target->node.start) {
264 if (vma->node.color == target->node.color)
265 continue;
266 }
267 if (vma->node.start == target->node.start + target->node.size) {
268 if (vma->node.color == target->node.color)
269 continue;
270 }
Chris Wilson506a8e82015-12-08 11:55:07 +0000271 }
272
Chris Wilson172ae5b2016-12-05 14:29:37 +0000273 if (flags & PIN_NONBLOCK &&
274 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
275 ret = -ENOSPC;
276 break;
277 }
278
279 /* Overlap of objects in the same batch? */
280 if (i915_vma_is_pinned(vma)) {
281 ret = -ENOSPC;
282 if (vma->exec_entry &&
283 vma->exec_entry->flags & EXEC_OBJECT_PINNED)
284 ret = -EINVAL;
285 break;
286 }
287
288 /* Never show fear in the face of dragons!
289 *
290 * We cannot directly remove this node from within this
291 * iterator and as with i915_gem_evict_something() we employ
292 * the vma pin_count in order to prevent the action of
293 * unbinding one vma from freeing (by dropping its active
294 * reference) another in our eviction list.
295 */
296 __i915_vma_pin(vma);
297 list_add(&vma->exec_list, &eviction_list);
Chris Wilson506a8e82015-12-08 11:55:07 +0000298 }
299
Chris Wilson172ae5b2016-12-05 14:29:37 +0000300 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
301 list_del_init(&vma->exec_list);
302 __i915_vma_unpin(vma);
303 if (ret == 0)
304 ret = i915_vma_unbind(vma);
305 }
306
307 return ret;
Chris Wilson506a8e82015-12-08 11:55:07 +0000308}
309
Ben Widawsky68c8c172013-09-11 14:57:50 -0700310/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100311 * i915_gem_evict_vm - Evict all idle vmas from a vm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100312 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700313 * @do_idle: Boolean directing whether to idle first.
314 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100315 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
316 * evicted the @do_idle needs to be set to true.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700317 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100318 * This is used by the execbuf code as a last-ditch effort to defragment the
319 * address space.
320 *
321 * To clarify: This is for freeing up virtual address space, not for freeing
322 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700323 */
324int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700325{
326 struct i915_vma *vma, *next;
327 int ret;
328
Chris Wilson49d73912016-11-29 09:50:08 +0000329 lockdep_assert_held(&vm->i915->drm.struct_mutex);
Ben Widawskybcccff82013-09-24 09:57:56 -0700330 trace_i915_gem_evict_vm(vm);
331
Ben Widawsky7b796122013-09-11 14:57:49 -0700332 if (do_idle) {
Chris Wilson49d73912016-11-29 09:50:08 +0000333 struct drm_i915_private *dev_priv = vm->i915;
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100334
Chris Wilson883445d2016-06-24 14:55:58 +0100335 if (i915_is_ggtt(vm)) {
Chris Wilson945657b2016-07-15 14:56:19 +0100336 ret = i915_gem_switch_to_kernel_context(dev_priv);
Chris Wilson883445d2016-06-24 14:55:58 +0100337 if (ret)
338 return ret;
339 }
Ben Widawsky7b796122013-09-11 14:57:49 -0700340
Chris Wilson22dd3bb2016-09-09 14:11:50 +0100341 ret = i915_gem_wait_for_idle(dev_priv,
342 I915_WAIT_INTERRUPTIBLE |
343 I915_WAIT_LOCKED);
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100344 if (ret)
345 return ret;
346
347 i915_gem_retire_requests(dev_priv);
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000348 WARN_ON(!list_empty(&vm->active_list));
Ben Widawsky7b796122013-09-11 14:57:49 -0700349 }
350
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000351 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100352 if (!i915_vma_is_pinned(vma))
Ben Widawsky7b796122013-09-11 14:57:49 -0700353 WARN_ON(i915_vma_unbind(vma));
354
355 return 0;
356}