blob: 5741b58d186c5aa86e5feaeb807c6c031f94b589 [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilson6e5a5be2016-06-24 14:55:57 +010036static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
37{
38 struct intel_engine_cs *engine;
39
40 if (i915.enable_execlists)
41 return 0;
42
43 for_each_engine(engine, dev_priv) {
44 struct drm_i915_gem_request *req;
45 int ret;
46
47 if (engine->last_context == NULL)
48 continue;
49
50 if (engine->last_context == dev_priv->kernel_context)
51 continue;
52
53 req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
54 if (IS_ERR(req))
55 return PTR_ERR(req);
56
57 ret = i915_switch_context(req);
58 i915_add_request_no_flush(req);
59 if (ret)
60 return ret;
61 }
62
63 return 0;
64}
65
66
Chris Wilsoncd377ea2010-08-07 11:01:24 +010067static bool
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070068mark_free(struct i915_vma *vma, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010069{
Chris Wilson30365372014-01-28 18:08:38 +000070 if (vma->pin_count)
Chris Wilson1b502472012-04-24 15:47:30 +010071 return false;
72
Daniel Vetterb93dab62013-08-26 11:23:47 +020073 if (WARN_ON(!list_empty(&vma->exec_list)))
74 return false;
75
Ben Widawsky82a55ad2013-08-14 11:38:34 +020076 list_add(&vma->exec_list, unwind);
Ben Widawsky2f633152013-07-17 12:19:03 -070077 return drm_mm_scan_add_block(&vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010078}
79
Daniel Vetterc2c1d492014-01-29 22:07:11 +010080/**
81 * i915_gem_evict_something - Evict vmas to make room for binding a new one
82 * @dev: drm_device
83 * @vm: address space to evict from
Daniel Vetter7838a632015-01-05 14:36:59 +010084 * @min_size: size of the desired free space
Daniel Vetterc2c1d492014-01-29 22:07:11 +010085 * @alignment: alignment constraint of the desired free space
86 * @cache_level: cache_level for the desired space
Daniel Vetter7838a632015-01-05 14:36:59 +010087 * @start: start (inclusive) of the range from which to evict objects
88 * @end: end (exclusive) of the range from which to evict objects
89 * @flags: additional flags to control the eviction algorithm
Daniel Vetterc2c1d492014-01-29 22:07:11 +010090 *
91 * This function will try to evict vmas until a free space satisfying the
92 * requirements is found. Callers must check first whether any such hole exists
93 * already before calling this function.
94 *
95 * This function is used by the object/vma binding code.
96 *
Daniel Vettereb0b44a2015-03-18 14:47:59 +010097 * Since this function is only used to free up virtual address space it only
98 * ignores pinned vmas, and not object where the backing storage itself is
99 * pinned. Hence obj->pages_pin_count does not protect against eviction.
100 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100101 * To clarify: This is for freeing up virtual address space, not for freeing
102 * memory in e.g. the shrinker.
103 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100104int
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700105i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
106 int min_size, unsigned alignment, unsigned cache_level,
Chris Wilsond23db882014-05-23 08:48:08 +0200107 unsigned long start, unsigned long end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100108 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100109{
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100110 struct list_head eviction_list, unwind_list;
Ben Widawsky2f633152013-07-17 12:19:03 -0700111 struct i915_vma *vma;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100112 int ret = 0;
Chris Wilson74e21ac2014-01-20 10:17:37 +0000113 int pass = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100114
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100115 trace_i915_gem_evict(dev, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +0000116
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100117 /*
118 * The goal is to evict objects and amalgamate space in LRU order.
119 * The oldest idle objects reside on the inactive list, which is in
120 * retirement order. The next objects to retire are those on the (per
121 * ring) active list that do not have an outstanding flush. Once the
122 * hardware reports completion (the seqno is updated after the
123 * batchbuffer has been finished) the clean buffer objects would
124 * be retired to the inactive list. Any dirty objects would be added
125 * to the tail of the flushing list. So after processing the clean
126 * active objects we need to emit a MI_FLUSH to retire the flushing
127 * list, hence the retirement order of the flushing list is in
128 * advance of the dirty objects on the active lists.
129 *
130 * The retirement sequence is thus:
131 * 1. Inactive objects (already retired)
132 * 2. Clean active objects
133 * 3. Flushing list
134 * 4. Dirty active objects.
135 *
136 * On each list, the oldest objects lie at the HEAD with the freshest
137 * object on the TAIL.
138 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100139
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100140 INIT_LIST_HEAD(&unwind_list);
Chris Wilsond23db882014-05-23 08:48:08 +0200141 if (start != 0 || end != vm->total) {
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700142 drm_mm_init_scan_with_range(&vm->mm, min_size,
Chris Wilsond23db882014-05-23 08:48:08 +0200143 alignment, cache_level,
144 start, end);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700145 } else
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700146 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100147
Chris Wilsonad071ac2013-12-09 10:37:24 +0000148search_again:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100149 /* First see if there is a large enough contiguous idle region... */
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000150 list_for_each_entry(vma, &vm->inactive_list, vm_link) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700151 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100152 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100153 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100154
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100155 if (flags & PIN_NONBLOCK)
Chris Wilson86a1ee22012-08-11 15:41:04 +0100156 goto none;
157
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100158 /* Now merge in the soon-to-be-expired objects... */
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000159 list_for_each_entry(vma, &vm->active_list, vm_link) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700160 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100161 goto found;
162 }
163
Chris Wilson86a1ee22012-08-11 15:41:04 +0100164none:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100165 /* Nothing found, clean up and bail out! */
Chris Wilson092de6f2011-01-10 14:21:05 +0000166 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200167 vma = list_first_entry(&unwind_list,
168 struct i915_vma,
Chris Wilson092de6f2011-01-10 14:21:05 +0000169 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700170 ret = drm_mm_scan_remove_block(&vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100171 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000172
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200173 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100174 }
175
Chris Wilsonad071ac2013-12-09 10:37:24 +0000176 /* Can we unpin some objects such as idle hw contents,
177 * or pending flips?
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100178 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100179 if (flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000180 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000181
182 /* Only idle the GPU and repeat the search once */
Chris Wilson74e21ac2014-01-20 10:17:37 +0000183 if (pass++ == 0) {
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100184 struct drm_i915_private *dev_priv = to_i915(dev);
185
186 ret = switch_to_pinned_context(dev_priv);
Chris Wilson74e21ac2014-01-20 10:17:37 +0000187 if (ret)
188 return ret;
189
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100190 ret = i915_gem_wait_for_idle(dev_priv);
191 if (ret)
192 return ret;
193
194 i915_gem_retire_requests(dev_priv);
Chris Wilson74e21ac2014-01-20 10:17:37 +0000195 goto search_again;
196 }
197
198 /* If we still have pending pageflip completions, drop
199 * back to userspace to give our workqueues time to
200 * acquire our locks and unpin the old scanouts.
201 */
202 return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100203
204found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100205 /* drm_mm doesn't allow any other other operations while
206 * scanning, therefore store to be evicted objects on a
207 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100208 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100209 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200210 vma = list_first_entry(&unwind_list,
211 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000212 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700213 if (drm_mm_scan_remove_block(&vma->node)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200214 list_move(&vma->exec_list, &eviction_list);
215 drm_gem_object_reference(&vma->obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100216 continue;
217 }
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200218 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100219 }
220
Chris Wilsone39a0152010-09-29 22:23:05 +0100221 /* Unbinding will emit any required flushes */
222 while (!list_empty(&eviction_list)) {
Ben Widawsky8637b402013-08-16 13:29:33 -0700223 struct drm_gem_object *obj;
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200224 vma = list_first_entry(&eviction_list,
225 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000226 exec_list);
Ben Widawsky8637b402013-08-16 13:29:33 -0700227
228 obj = &vma->obj->base;
229 list_del_init(&vma->exec_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100230 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200231 ret = i915_vma_unbind(vma);
Chris Wilson092de6f2011-01-10 14:21:05 +0000232
Ben Widawsky8637b402013-08-16 13:29:33 -0700233 drm_gem_object_unreference(obj);
Chris Wilsone39a0152010-09-29 22:23:05 +0100234 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100235
Chris Wilsone39a0152010-09-29 22:23:05 +0100236 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100237}
238
Chris Wilson506a8e82015-12-08 11:55:07 +0000239int
240i915_gem_evict_for_vma(struct i915_vma *target)
241{
242 struct drm_mm_node *node, *next;
243
244 list_for_each_entry_safe(node, next,
245 &target->vm->mm.head_node.node_list,
246 node_list) {
247 struct i915_vma *vma;
248 int ret;
249
250 if (node->start + node->size <= target->node.start)
251 continue;
252 if (node->start >= target->node.start + target->node.size)
253 break;
254
255 vma = container_of(node, typeof(*vma), node);
256
257 if (vma->pin_count) {
258 if (!vma->exec_entry || (vma->pin_count > 1))
259 /* Object is pinned for some other use */
260 return -EBUSY;
261
262 /* We need to evict a buffer in the same batch */
263 if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
264 /* Overlapping fixed objects in the same batch */
265 return -EINVAL;
266
267 return -ENOSPC;
268 }
269
270 ret = i915_vma_unbind(vma);
271 if (ret)
272 return ret;
273 }
274
275 return 0;
276}
277
Ben Widawsky68c8c172013-09-11 14:57:50 -0700278/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100279 * i915_gem_evict_vm - Evict all idle vmas from a vm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100280 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700281 * @do_idle: Boolean directing whether to idle first.
282 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100283 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
284 * evicted the @do_idle needs to be set to true.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700285 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100286 * This is used by the execbuf code as a last-ditch effort to defragment the
287 * address space.
288 *
289 * To clarify: This is for freeing up virtual address space, not for freeing
290 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700291 */
292int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700293{
294 struct i915_vma *vma, *next;
295 int ret;
296
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000297 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
Ben Widawskybcccff82013-09-24 09:57:56 -0700298 trace_i915_gem_evict_vm(vm);
299
Ben Widawsky7b796122013-09-11 14:57:49 -0700300 if (do_idle) {
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100301 struct drm_i915_private *dev_priv = to_i915(vm->dev);
302
303 ret = switch_to_pinned_context(dev_priv);
Ben Widawsky7b796122013-09-11 14:57:49 -0700304 if (ret)
305 return ret;
306
Chris Wilson6e5a5be2016-06-24 14:55:57 +0100307 ret = i915_gem_wait_for_idle(dev_priv);
308 if (ret)
309 return ret;
310
311 i915_gem_retire_requests(dev_priv);
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000312
313 WARN_ON(!list_empty(&vm->active_list));
Ben Widawsky7b796122013-09-11 14:57:49 -0700314 }
315
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000316 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800317 if (vma->pin_count == 0)
Ben Widawsky7b796122013-09-11 14:57:49 -0700318 WARN_ON(i915_vma_unbind(vma));
319
320 return 0;
321}