blob: e3a49d94da3a794fe3524fb79eadefa423b266b8 [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilsoncd377ea2010-08-07 11:01:24 +010036static bool
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070037mark_free(struct i915_vma *vma, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010038{
Chris Wilson30365372014-01-28 18:08:38 +000039 if (vma->pin_count)
Chris Wilson1b502472012-04-24 15:47:30 +010040 return false;
41
Daniel Vetterb93dab62013-08-26 11:23:47 +020042 if (WARN_ON(!list_empty(&vma->exec_list)))
43 return false;
44
Ben Widawsky82a55ad2013-08-14 11:38:34 +020045 list_add(&vma->exec_list, unwind);
Ben Widawsky2f633152013-07-17 12:19:03 -070046 return drm_mm_scan_add_block(&vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010047}
48
Daniel Vetterc2c1d492014-01-29 22:07:11 +010049/**
50 * i915_gem_evict_something - Evict vmas to make room for binding a new one
51 * @dev: drm_device
52 * @vm: address space to evict from
Daniel Vetter7838a632015-01-05 14:36:59 +010053 * @min_size: size of the desired free space
Daniel Vetterc2c1d492014-01-29 22:07:11 +010054 * @alignment: alignment constraint of the desired free space
55 * @cache_level: cache_level for the desired space
Daniel Vetter7838a632015-01-05 14:36:59 +010056 * @start: start (inclusive) of the range from which to evict objects
57 * @end: end (exclusive) of the range from which to evict objects
58 * @flags: additional flags to control the eviction algorithm
Daniel Vetterc2c1d492014-01-29 22:07:11 +010059 *
60 * This function will try to evict vmas until a free space satisfying the
61 * requirements is found. Callers must check first whether any such hole exists
62 * already before calling this function.
63 *
64 * This function is used by the object/vma binding code.
65 *
66 * To clarify: This is for freeing up virtual address space, not for freeing
67 * memory in e.g. the shrinker.
68 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010069int
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070070i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
71 int min_size, unsigned alignment, unsigned cache_level,
Chris Wilsond23db882014-05-23 08:48:08 +020072 unsigned long start, unsigned long end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +010073 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010074{
Chris Wilsoncd377ea2010-08-07 11:01:24 +010075 struct list_head eviction_list, unwind_list;
Ben Widawsky2f633152013-07-17 12:19:03 -070076 struct i915_vma *vma;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010077 int ret = 0;
Chris Wilson74e21ac2014-01-20 10:17:37 +000078 int pass = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010079
Daniel Vetter1ec9e262014-02-14 14:01:11 +010080 trace_i915_gem_evict(dev, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +000081
Chris Wilsoncd377ea2010-08-07 11:01:24 +010082 /*
83 * The goal is to evict objects and amalgamate space in LRU order.
84 * The oldest idle objects reside on the inactive list, which is in
85 * retirement order. The next objects to retire are those on the (per
86 * ring) active list that do not have an outstanding flush. Once the
87 * hardware reports completion (the seqno is updated after the
88 * batchbuffer has been finished) the clean buffer objects would
89 * be retired to the inactive list. Any dirty objects would be added
90 * to the tail of the flushing list. So after processing the clean
91 * active objects we need to emit a MI_FLUSH to retire the flushing
92 * list, hence the retirement order of the flushing list is in
93 * advance of the dirty objects on the active lists.
94 *
95 * The retirement sequence is thus:
96 * 1. Inactive objects (already retired)
97 * 2. Clean active objects
98 * 3. Flushing list
99 * 4. Dirty active objects.
100 *
101 * On each list, the oldest objects lie at the HEAD with the freshest
102 * object on the TAIL.
103 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100104
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100105 INIT_LIST_HEAD(&unwind_list);
Chris Wilsond23db882014-05-23 08:48:08 +0200106 if (start != 0 || end != vm->total) {
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700107 drm_mm_init_scan_with_range(&vm->mm, min_size,
Chris Wilsond23db882014-05-23 08:48:08 +0200108 alignment, cache_level,
109 start, end);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700110 } else
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100112
Chris Wilsonad071ac2013-12-09 10:37:24 +0000113search_again:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100114 /* First see if there is a large enough contiguous idle region... */
Ben Widawskyca191b12013-07-31 17:00:14 -0700115 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700116 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100117 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100118 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100119
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100120 if (flags & PIN_NONBLOCK)
Chris Wilson86a1ee22012-08-11 15:41:04 +0100121 goto none;
122
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100123 /* Now merge in the soon-to-be-expired objects... */
Ben Widawskyca191b12013-07-31 17:00:14 -0700124 list_for_each_entry(vma, &vm->active_list, mm_list) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700125 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100126 goto found;
127 }
128
Chris Wilson86a1ee22012-08-11 15:41:04 +0100129none:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100130 /* Nothing found, clean up and bail out! */
Chris Wilson092de6f2011-01-10 14:21:05 +0000131 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200132 vma = list_first_entry(&unwind_list,
133 struct i915_vma,
Chris Wilson092de6f2011-01-10 14:21:05 +0000134 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700135 ret = drm_mm_scan_remove_block(&vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100136 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000137
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200138 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100139 }
140
Chris Wilsonad071ac2013-12-09 10:37:24 +0000141 /* Can we unpin some objects such as idle hw contents,
142 * or pending flips?
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100143 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100144 if (flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000145 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000146
147 /* Only idle the GPU and repeat the search once */
Chris Wilson74e21ac2014-01-20 10:17:37 +0000148 if (pass++ == 0) {
149 ret = i915_gpu_idle(dev);
150 if (ret)
151 return ret;
152
153 i915_gem_retire_requests(dev);
154 goto search_again;
155 }
156
157 /* If we still have pending pageflip completions, drop
158 * back to userspace to give our workqueues time to
159 * acquire our locks and unpin the old scanouts.
160 */
161 return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100162
163found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100164 /* drm_mm doesn't allow any other other operations while
165 * scanning, therefore store to be evicted objects on a
166 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100167 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100168 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200169 vma = list_first_entry(&unwind_list,
170 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000171 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700172 if (drm_mm_scan_remove_block(&vma->node)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200173 list_move(&vma->exec_list, &eviction_list);
174 drm_gem_object_reference(&vma->obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100175 continue;
176 }
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200177 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100178 }
179
Chris Wilsone39a0152010-09-29 22:23:05 +0100180 /* Unbinding will emit any required flushes */
181 while (!list_empty(&eviction_list)) {
Ben Widawsky8637b402013-08-16 13:29:33 -0700182 struct drm_gem_object *obj;
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200183 vma = list_first_entry(&eviction_list,
184 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000185 exec_list);
Ben Widawsky8637b402013-08-16 13:29:33 -0700186
187 obj = &vma->obj->base;
188 list_del_init(&vma->exec_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100189 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200190 ret = i915_vma_unbind(vma);
Chris Wilson092de6f2011-01-10 14:21:05 +0000191
Ben Widawsky8637b402013-08-16 13:29:33 -0700192 drm_gem_object_unreference(obj);
Chris Wilsone39a0152010-09-29 22:23:05 +0100193 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100194
Chris Wilsone39a0152010-09-29 22:23:05 +0100195 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100196}
197
Ben Widawsky68c8c172013-09-11 14:57:50 -0700198/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100199 * i915_gem_evict_vm - Evict all idle vmas from a vm
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100200 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700201 * @do_idle: Boolean directing whether to idle first.
202 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100203 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
204 * evicted the @do_idle needs to be set to true.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700205 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100206 * This is used by the execbuf code as a last-ditch effort to defragment the
207 * address space.
208 *
209 * To clarify: This is for freeing up virtual address space, not for freeing
210 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700211 */
212int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700213{
214 struct i915_vma *vma, *next;
215 int ret;
216
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000217 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
Ben Widawskybcccff82013-09-24 09:57:56 -0700218 trace_i915_gem_evict_vm(vm);
219
Ben Widawsky7b796122013-09-11 14:57:49 -0700220 if (do_idle) {
221 ret = i915_gpu_idle(vm->dev);
222 if (ret)
223 return ret;
224
225 i915_gem_retire_requests(vm->dev);
Ben Widawskyb9b5dce2014-12-23 17:16:04 +0000226
227 WARN_ON(!list_empty(&vm->active_list));
Ben Widawsky7b796122013-09-11 14:57:49 -0700228 }
229
230 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800231 if (vma->pin_count == 0)
Ben Widawsky7b796122013-09-11 14:57:49 -0700232 WARN_ON(i915_vma_unbind(vma));
233
234 return 0;
235}
236
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100237/**
238 * i915_gem_evict_everything - Try to evict all objects
239 * @dev: Device to evict objects for
240 *
241 * This functions tries to evict all gem objects from all address spaces. Used
242 * by the shrinker as a last-ditch effort and for suspend, before releasing the
243 * backing storage of all unbound objects.
244 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100245int
Chris Wilson6c085a72012-08-20 11:40:46 +0200246i915_gem_evict_everything(struct drm_device *dev)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100247{
Jani Nikula50227e12014-03-31 14:27:21 +0300248 struct drm_i915_private *dev_priv = dev->dev_private;
Michel Thierrycf303622014-09-09 13:04:43 +0100249 struct i915_address_space *vm, *v;
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700250 bool lists_empty = true;
Chris Wilsonb4519512012-05-11 14:29:30 +0100251 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100252
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700253 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
254 lists_empty = (list_empty(&vm->inactive_list) &&
255 list_empty(&vm->active_list));
256 if (!lists_empty)
257 lists_empty = false;
258 }
259
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100260 if (lists_empty)
261 return -ENOSPC;
262
Chris Wilson6c085a72012-08-20 11:40:46 +0200263 trace_i915_gem_evict_everything(dev);
Chris Wilsondb53a302011-02-03 11:57:46 +0000264
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700265 /* The gpu_idle will flush everything in the write domain to the
266 * active list. Then we must move everything off the active list
267 * with retire requests.
268 */
Chris Wilsonb4519512012-05-11 14:29:30 +0100269 ret = i915_gpu_idle(dev);
270 if (ret)
271 return ret;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700272
273 i915_gem_retire_requests(dev);
274
Chris Wilsona39d7ef2012-04-24 18:22:52 +0100275 /* Having flushed everything, unbind() should never raise an error */
Michel Thierrycf303622014-09-09 13:04:43 +0100276 list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
Ben Widawsky7b796122013-09-11 14:57:49 -0700277 WARN_ON(i915_gem_evict_vm(vm, false));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100278
Chris Wilsonb4519512012-05-11 14:29:30 +0100279 return 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100280}