blob: bbf4b12d842effa7972e7dcaaa23e1de08434b59 [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/i915_drm.h>
Chris Wilson74e21ac2014-01-20 10:17:37 +000031
32#include "i915_drv.h"
33#include "intel_drv.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000034#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010035
Chris Wilsoncd377ea2010-08-07 11:01:24 +010036static bool
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070037mark_free(struct i915_vma *vma, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010038{
Chris Wilson30365372014-01-28 18:08:38 +000039 if (vma->pin_count)
Chris Wilson1b502472012-04-24 15:47:30 +010040 return false;
41
Daniel Vetterb93dab62013-08-26 11:23:47 +020042 if (WARN_ON(!list_empty(&vma->exec_list)))
43 return false;
44
Ben Widawsky82a55ad2013-08-14 11:38:34 +020045 list_add(&vma->exec_list, unwind);
Ben Widawsky2f633152013-07-17 12:19:03 -070046 return drm_mm_scan_add_block(&vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010047}
48
Daniel Vetterc2c1d492014-01-29 22:07:11 +010049/**
50 * i915_gem_evict_something - Evict vmas to make room for binding a new one
51 * @dev: drm_device
52 * @vm: address space to evict from
53 * @size: size of the desired free space
54 * @alignment: alignment constraint of the desired free space
55 * @cache_level: cache_level for the desired space
56 * @mappable: whether the free space must be mappable
57 * @nonblocking: whether evicting active objects is allowed or not
58 *
59 * This function will try to evict vmas until a free space satisfying the
60 * requirements is found. Callers must check first whether any such hole exists
61 * already before calling this function.
62 *
63 * This function is used by the object/vma binding code.
64 *
65 * To clarify: This is for freeing up virtual address space, not for freeing
66 * memory in e.g. the shrinker.
67 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010068int
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070069i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level,
Chris Wilsond23db882014-05-23 08:48:08 +020071 unsigned long start, unsigned long end,
Daniel Vetter1ec9e262014-02-14 14:01:11 +010072 unsigned flags)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010073{
Chris Wilsoncd377ea2010-08-07 11:01:24 +010074 struct list_head eviction_list, unwind_list;
Ben Widawsky2f633152013-07-17 12:19:03 -070075 struct i915_vma *vma;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010076 int ret = 0;
Chris Wilson74e21ac2014-01-20 10:17:37 +000077 int pass = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010078
Daniel Vetter1ec9e262014-02-14 14:01:11 +010079 trace_i915_gem_evict(dev, min_size, alignment, flags);
Chris Wilsondb53a302011-02-03 11:57:46 +000080
Chris Wilsoncd377ea2010-08-07 11:01:24 +010081 /*
82 * The goal is to evict objects and amalgamate space in LRU order.
83 * The oldest idle objects reside on the inactive list, which is in
84 * retirement order. The next objects to retire are those on the (per
85 * ring) active list that do not have an outstanding flush. Once the
86 * hardware reports completion (the seqno is updated after the
87 * batchbuffer has been finished) the clean buffer objects would
88 * be retired to the inactive list. Any dirty objects would be added
89 * to the tail of the flushing list. So after processing the clean
90 * active objects we need to emit a MI_FLUSH to retire the flushing
91 * list, hence the retirement order of the flushing list is in
92 * advance of the dirty objects on the active lists.
93 *
94 * The retirement sequence is thus:
95 * 1. Inactive objects (already retired)
96 * 2. Clean active objects
97 * 3. Flushing list
98 * 4. Dirty active objects.
99 *
100 * On each list, the oldest objects lie at the HEAD with the freshest
101 * object on the TAIL.
102 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100103
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100104 INIT_LIST_HEAD(&unwind_list);
Chris Wilsond23db882014-05-23 08:48:08 +0200105 if (start != 0 || end != vm->total) {
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700106 drm_mm_init_scan_with_range(&vm->mm, min_size,
Chris Wilsond23db882014-05-23 08:48:08 +0200107 alignment, cache_level,
108 start, end);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700109 } else
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100111
Chris Wilsonad071ac2013-12-09 10:37:24 +0000112search_again:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100113 /* First see if there is a large enough contiguous idle region... */
Ben Widawskyca191b12013-07-31 17:00:14 -0700114 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700115 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100116 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100117 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100118
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100119 if (flags & PIN_NONBLOCK)
Chris Wilson86a1ee22012-08-11 15:41:04 +0100120 goto none;
121
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100122 /* Now merge in the soon-to-be-expired objects... */
Ben Widawskyca191b12013-07-31 17:00:14 -0700123 list_for_each_entry(vma, &vm->active_list, mm_list) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700124 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100125 goto found;
126 }
127
Chris Wilson86a1ee22012-08-11 15:41:04 +0100128none:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100129 /* Nothing found, clean up and bail out! */
Chris Wilson092de6f2011-01-10 14:21:05 +0000130 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200131 vma = list_first_entry(&unwind_list,
132 struct i915_vma,
Chris Wilson092de6f2011-01-10 14:21:05 +0000133 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700134 ret = drm_mm_scan_remove_block(&vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100135 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000136
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200137 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100138 }
139
Chris Wilsonad071ac2013-12-09 10:37:24 +0000140 /* Can we unpin some objects such as idle hw contents,
141 * or pending flips?
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100142 */
Daniel Vetter1ec9e262014-02-14 14:01:11 +0100143 if (flags & PIN_NONBLOCK)
Chris Wilson74e21ac2014-01-20 10:17:37 +0000144 return -ENOSPC;
Chris Wilsonad071ac2013-12-09 10:37:24 +0000145
146 /* Only idle the GPU and repeat the search once */
Chris Wilson74e21ac2014-01-20 10:17:37 +0000147 if (pass++ == 0) {
148 ret = i915_gpu_idle(dev);
149 if (ret)
150 return ret;
151
152 i915_gem_retire_requests(dev);
153 goto search_again;
154 }
155
156 /* If we still have pending pageflip completions, drop
157 * back to userspace to give our workqueues time to
158 * acquire our locks and unpin the old scanouts.
159 */
160 return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100161
162found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100163 /* drm_mm doesn't allow any other other operations while
164 * scanning, therefore store to be evicted objects on a
165 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100166 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100167 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200168 vma = list_first_entry(&unwind_list,
169 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000170 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700171 if (drm_mm_scan_remove_block(&vma->node)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200172 list_move(&vma->exec_list, &eviction_list);
173 drm_gem_object_reference(&vma->obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100174 continue;
175 }
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200176 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100177 }
178
Chris Wilsone39a0152010-09-29 22:23:05 +0100179 /* Unbinding will emit any required flushes */
180 while (!list_empty(&eviction_list)) {
Ben Widawsky8637b402013-08-16 13:29:33 -0700181 struct drm_gem_object *obj;
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200182 vma = list_first_entry(&eviction_list,
183 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000184 exec_list);
Ben Widawsky8637b402013-08-16 13:29:33 -0700185
186 obj = &vma->obj->base;
187 list_del_init(&vma->exec_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100188 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200189 ret = i915_vma_unbind(vma);
Chris Wilson092de6f2011-01-10 14:21:05 +0000190
Ben Widawsky8637b402013-08-16 13:29:33 -0700191 drm_gem_object_unreference(obj);
Chris Wilsone39a0152010-09-29 22:23:05 +0100192 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100193
Chris Wilsone39a0152010-09-29 22:23:05 +0100194 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100195}
196
Ben Widawsky68c8c172013-09-11 14:57:50 -0700197/**
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100198 * i915_gem_evict_vm - Evict all idle vmas from a vm
Ben Widawsky68c8c172013-09-11 14:57:50 -0700199 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100200 * @vm: Address space to cleanse
Ben Widawsky68c8c172013-09-11 14:57:50 -0700201 * @do_idle: Boolean directing whether to idle first.
202 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100203 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
204 * evicted the @do_idle needs to be set to true.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700205 *
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100206 * This is used by the execbuf code as a last-ditch effort to defragment the
207 * address space.
208 *
209 * To clarify: This is for freeing up virtual address space, not for freeing
210 * memory in e.g. the shrinker.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700211 */
212int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700213{
214 struct i915_vma *vma, *next;
215 int ret;
216
Ben Widawskybcccff82013-09-24 09:57:56 -0700217 trace_i915_gem_evict_vm(vm);
218
Ben Widawsky7b796122013-09-11 14:57:49 -0700219 if (do_idle) {
220 ret = i915_gpu_idle(vm->dev);
221 if (ret)
222 return ret;
223
224 i915_gem_retire_requests(vm->dev);
225 }
226
227 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800228 if (vma->pin_count == 0)
Ben Widawsky7b796122013-09-11 14:57:49 -0700229 WARN_ON(i915_vma_unbind(vma));
230
231 return 0;
232}
233
Daniel Vetterc2c1d492014-01-29 22:07:11 +0100234/**
235 * i915_gem_evict_everything - Try to evict all objects
236 * @dev: Device to evict objects for
237 *
238 * This functions tries to evict all gem objects from all address spaces. Used
239 * by the shrinker as a last-ditch effort and for suspend, before releasing the
240 * backing storage of all unbound objects.
241 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100242int
Chris Wilson6c085a72012-08-20 11:40:46 +0200243i915_gem_evict_everything(struct drm_device *dev)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100244{
Jani Nikula50227e12014-03-31 14:27:21 +0300245 struct drm_i915_private *dev_priv = dev->dev_private;
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700246 struct i915_address_space *vm;
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700247 bool lists_empty = true;
Chris Wilsonb4519512012-05-11 14:29:30 +0100248 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100249
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700250 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
251 lists_empty = (list_empty(&vm->inactive_list) &&
252 list_empty(&vm->active_list));
253 if (!lists_empty)
254 lists_empty = false;
255 }
256
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100257 if (lists_empty)
258 return -ENOSPC;
259
Chris Wilson6c085a72012-08-20 11:40:46 +0200260 trace_i915_gem_evict_everything(dev);
Chris Wilsondb53a302011-02-03 11:57:46 +0000261
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700262 /* The gpu_idle will flush everything in the write domain to the
263 * active list. Then we must move everything off the active list
264 * with retire requests.
265 */
Chris Wilsonb4519512012-05-11 14:29:30 +0100266 ret = i915_gpu_idle(dev);
267 if (ret)
268 return ret;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700269
270 i915_gem_retire_requests(dev);
271
Chris Wilsona39d7ef2012-04-24 18:22:52 +0100272 /* Having flushed everything, unbind() should never raise an error */
Ben Widawsky7b796122013-09-11 14:57:49 -0700273 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
274 WARN_ON(i915_gem_evict_vm(vm, false));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100275
Chris Wilsonb4519512012-05-11 14:29:30 +0100276 return 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100277}