blob: f76c06e9267736c50060d5281e49baff6ba77d6c [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilsoncd377ea2010-08-07 11:01:24 +010036static bool
Chris Wilson9332f3b2016-08-04 16:32:17 +010037gpu_is_idle(struct drm_i915_private *dev_priv)
38{
39 struct intel_engine_cs *engine;
40
41 for_each_engine(engine, dev_priv) {
Chris Wilsondcff85c2016-08-05 10:14:11 +010042 if (intel_engine_is_active(engine))
Chris Wilson9332f3b2016-08-04 16:32:17 +010043 return false;
44 }
45
46 return true;
47}
48
49static bool
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070050mark_free(struct i915_vma *vma, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010051{
Chris Wilson20dfbde2016-08-04 16:32:30 +010052 if (i915_vma_is_pinned(vma))
Chris Wilson1b502472012-04-24 15:47:30 +010053 return false;
54
Daniel Vetterb93dab62013-08-26 11:23:47 +020055 if (WARN_ON(!list_empty(&vma->exec_list)))
56 return false;
57
Ben Widawsky82a55ad2013-08-14 11:38:34 +020058 list_add(&vma->exec_list, unwind);
Ben Widawsky2f633152013-07-17 12:19:03 -070059 return drm_mm_scan_add_block(&vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010060}
61
Daniel Vetterc2c1d492014-01-29 22:07:11 +010062/**
63 * i915_gem_evict_something - Evict vmas to make room for binding a new one
Daniel Vetterc2c1d492014-01-29 22:07:11 +010064 * @vm: address space to evict from
Daniel Vetter7838a632015-01-05 14:36:59 +010065 * @min_size: size of the desired free space
Daniel Vetterc2c1d492014-01-29 22:07:11 +010066 * @alignment: alignment constraint of the desired free space
67 * @cache_level: cache_level for the desired space
Daniel Vetter7838a632015-01-05 14:36:59 +010068 * @start: start (inclusive) of the range from which to evict objects
69 * @end: end (exclusive) of the range from which to evict objects
70 * @flags: additional flags to control the eviction algorithm
Daniel Vetterc2c1d492014-01-29 22:07:11 +010071 *
72 * This function will try to evict vmas until a free space satisfying the
73 * requirements is found. Callers must check first whether any such hole exists
74 * already before calling this function.
75 *
76 * This function is used by the object/vma binding code.
77 *
Daniel Vettereb0b44a2015-03-18 14:47:59 +010078 * Since this function is only used to free up virtual address space it only
79 * ignores pinned vmas, and not object where the backing storage itself is
80 * pinned. Hence obj->pages_pin_count does not protect against eviction.
81 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +010082 * To clarify: This is for freeing up virtual address space, not for freeing
83 * memory in e.g. the shrinker.
84 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010085int
Chris Wilsone522ac22016-08-04 16:32:18 +010086i915_gem_evict_something(struct i915_address_space *vm,
Chris Wilson2ffffd02016-08-04 16:32:22 +010087 u64 min_size, u64 alignment,
88 unsigned cache_level,
89 u64 start, u64 end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +010090 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010091{
Chris Wilsone522ac22016-08-04 16:32:18 +010092 struct drm_i915_private *dev_priv = to_i915(vm->dev);
Chris Wilson9332f3b2016-08-04 16:32:17 +010093 struct list_head eviction_list;
94 struct list_head *phases[] = {
95 &vm->inactive_list,
96 &vm->active_list,
97 NULL,
98 }, **phase;
99 struct i915_vma *vma, *next;
100 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100101
Chris Wilsone522ac22016-08-04 16:32:18 +0100102 trace_i915_gem_evict(vm, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +0000103
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100104 /*
105 * The goal is to evict objects and amalgamate space in LRU order.
106 * The oldest idle objects reside on the inactive list, which is in
Chris Wilson9332f3b2016-08-04 16:32:17 +0100107 * retirement order. The next objects to retire are those in flight,
108 * on the active list, again in retirement order.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100109 *
110 * The retirement sequence is thus:
111 * 1. Inactive objects (already retired)
Chris Wilson9332f3b2016-08-04 16:32:17 +0100112 * 2. Active objects (will stall on unbinding)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100113 *
114 * On each list, the oldest objects lie at the HEAD with the freshest
115 * object on the TAIL.
116 */
Chris Wilsond23db882014-05-23 08:48:08 +0200117 if (start != 0 || end != vm->total) {
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700118 drm_mm_init_scan_with_range(&vm->mm, min_size,
Chris Wilsond23db882014-05-23 08:48:08 +0200119 alignment, cache_level,
120 start, end);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700121 } else
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700122 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100123
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100124 if (flags & PIN_NONBLOCK)
Chris Wilson9332f3b2016-08-04 16:32:17 +0100125 phases[1] = NULL;
Chris Wilson86a1ee22012-08-11 15:41:04 +0100126
Chris Wilson9332f3b2016-08-04 16:32:17 +0100127search_again:
128 INIT_LIST_HEAD(&eviction_list);
129 phase = phases;
130 do {
131 list_for_each_entry(vma, *phase, vm_link)
132 if (mark_free(vma, &eviction_list))
133 goto found;
134 } while (*++phase);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100135
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100136 /* Nothing found, clean up and bail out! */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100137 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
Ben Widawsky2f633152013-07-17 12:19:03 -0700138 ret = drm_mm_scan_remove_block(&vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100139 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000140
Chris Wilson9332f3b2016-08-04 16:32:17 +0100141 INIT_LIST_HEAD(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100142 }
143
Chris Wilsonad071ac2013-12-09 10:37:24 +0000144 /* Can we unpin some objects such as idle hw contents,
Chris Wilson9332f3b2016-08-04 16:32:17 +0100145 * or pending flips? But since only the GGTT has global entries
146 * such as scanouts, rinbuffers and contexts, we can skip the
147 * purge when inspecting per-process local address spaces.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100148 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100149 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000150 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000151
Chris Wilson9332f3b2016-08-04 16:32:17 +0100152 if (gpu_is_idle(dev_priv)) {
153 /* If we still have pending pageflip completions, drop
154 * back to userspace to give our workqueues time to
155 * acquire our locks and unpin the old scanouts.
156 */
Chris Wilsone522ac22016-08-04 16:32:18 +0100157 return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
Chris Wilson74e21ac2014-01-20 10:17:37 +0000158 }
159
Chris Wilson9332f3b2016-08-04 16:32:17 +0100160 /* Not everything in the GGTT is tracked via vma (otherwise we
161 * could evict as required with minimal stalling) so we are forced
162 * to idle the GPU and explicitly retire outstanding requests in
163 * the hopes that we can then remove contexts and the like only
164 * bound by their active reference.
Chris Wilson74e21ac2014-01-20 10:17:37 +0000165 */
Chris Wilson9332f3b2016-08-04 16:32:17 +0100166 ret = i915_gem_switch_to_kernel_context(dev_priv);
167 if (ret)
168 return ret;
169
Chris Wilsondcff85c2016-08-05 10:14:11 +0100170 ret = i915_gem_wait_for_idle(dev_priv, true);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100171 if (ret)
172 return ret;
173
174 i915_gem_retire_requests(dev_priv);
175 goto search_again;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100176
177found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100178 /* drm_mm doesn't allow any other other operations while
Chris Wilson9332f3b2016-08-04 16:32:17 +0100179 * scanning, therefore store to-be-evicted objects on a
180 * temporary list and take a reference for all before
181 * calling unbind (which may remove the active reference
182 * of any of our objects, thus corrupting the list).
183 */
184 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
185 if (drm_mm_scan_remove_block(&vma->node))
Chris Wilson20dfbde2016-08-04 16:32:30 +0100186 __i915_vma_pin(vma);
Chris Wilson9332f3b2016-08-04 16:32:17 +0100187 else
188 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100189 }
190
Chris Wilsone39a0152010-09-29 22:23:05 +0100191 /* Unbinding will emit any required flushes */
192 while (!list_empty(&eviction_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200193 vma = list_first_entry(&eviction_list,
194 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000195 exec_list);
Ben Widawsky8637b402013-08-16 13:29:33 -0700196
Ben Widawsky8637b402013-08-16 13:29:33 -0700197 list_del_init(&vma->exec_list);
Chris Wilson20dfbde2016-08-04 16:32:30 +0100198 __i915_vma_unpin(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100199 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200200 ret = i915_vma_unbind(vma);
Chris Wilsone39a0152010-09-29 22:23:05 +0100201 }
Chris Wilsone39a0152010-09-29 22:23:05 +0100202 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100203}
204
Chris Wilson506a8e82015-12-08 11:55:07 +0000205int
206i915_gem_evict_for_vma(struct i915_vma *target)
207{
208 struct drm_mm_node *node, *next;
209
210 list_for_each_entry_safe(node, next,
211 &target->vm->mm.head_node.node_list,
212 node_list) {
213 struct i915_vma *vma;
214 int ret;
215
216 if (node->start + node->size <= target->node.start)
217 continue;
218 if (node->start >= target->node.start + target->node.size)
219 break;
220
221 vma = container_of(node, typeof(*vma), node);
222
Chris Wilson20dfbde2016-08-04 16:32:30 +0100223 if (i915_vma_is_pinned(vma)) {
224 if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
Chris Wilson506a8e82015-12-08 11:55:07 +0000225 /* Object is pinned for some other use */
226 return -EBUSY;
227
228 /* We need to evict a buffer in the same batch */
229 if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
230 /* Overlapping fixed objects in the same batch */
231 return -EINVAL;
232
233 return -ENOSPC;
234 }
235
236 ret = i915_vma_unbind(vma);
237 if (ret)
238 return ret;
239 }
240
241 return 0;
242}
243
Ben Widawsky68c8c172013-09-11 14:57:50 -0700244/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100245 * i915_gem_evict_vm - Evict all idle vmas from a vm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100246 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700247 * @do_idle: Boolean directing whether to idle first.
248 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100249 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
250 * evicted the @do_idle needs to be set to true.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700251 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100252 * This is used by the execbuf code as a last-ditch effort to defragment the
253 * address space.
254 *
255 * To clarify: This is for freeing up virtual address space, not for freeing
256 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700257 */
258int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700259{
260 struct i915_vma *vma, *next;
261 int ret;
262
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000263 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
Ben Widawskybcccff82013-09-24 09:57:56 -0700264 trace_i915_gem_evict_vm(vm);
265
Ben Widawsky7b796122013-09-11 14:57:49 -0700266 if (do_idle) {
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100267 struct drm_i915_private *dev_priv = to_i915(vm->dev);
268
Chris Wilson883445d2016-06-24 14:55:58 +0100269 if (i915_is_ggtt(vm)) {
Chris Wilson945657b2016-07-15 14:56:19 +0100270 ret = i915_gem_switch_to_kernel_context(dev_priv);
Chris Wilson883445d2016-06-24 14:55:58 +0100271 if (ret)
272 return ret;
273 }
Ben Widawsky7b796122013-09-11 14:57:49 -0700274
Chris Wilsondcff85c2016-08-05 10:14:11 +0100275 ret = i915_gem_wait_for_idle(dev_priv, true);
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100276 if (ret)
277 return ret;
278
279 i915_gem_retire_requests(dev_priv);
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000280 WARN_ON(!list_empty(&vm->active_list));
Ben Widawsky7b796122013-09-11 14:57:49 -0700281 }
282
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000283 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
Chris Wilson20dfbde2016-08-04 16:32:30 +0100284 if (!i915_vma_is_pinned(vma))
Ben Widawsky7b796122013-09-11 14:57:49 -0700285 WARN_ON(i915_vma_unbind(vma));
286
287 return 0;
288}