blob: b7376533633d2cd74eeae8442a49588cf1af1cdc [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010030#include "i915_drv.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/i915_drm.h>
Chris Wilsondb53a302011-02-03 11:57:46 +000032#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010033
Chris Wilsoncd377ea2010-08-07 11:01:24 +010034static bool
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070035mark_free(struct i915_vma *vma, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010036{
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070037 if (vma->obj->pin_count)
Chris Wilson1b502472012-04-24 15:47:30 +010038 return false;
39
Daniel Vetterb93dab62013-08-26 11:23:47 +020040 if (WARN_ON(!list_empty(&vma->exec_list)))
41 return false;
42
Ben Widawsky82a55ad2013-08-14 11:38:34 +020043 list_add(&vma->exec_list, unwind);
Ben Widawsky2f633152013-07-17 12:19:03 -070044 return drm_mm_scan_add_block(&vma->node);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010045}
46
47int
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070048i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
49 int min_size, unsigned alignment, unsigned cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +010050 bool mappable, bool nonblocking)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010051{
52 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010053 struct list_head eviction_list, unwind_list;
Ben Widawsky2f633152013-07-17 12:19:03 -070054 struct i915_vma *vma;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010055 int ret = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010056
Chris Wilsondb53a302011-02-03 11:57:46 +000057 trace_i915_gem_evict(dev, min_size, alignment, mappable);
58
Chris Wilsoncd377ea2010-08-07 11:01:24 +010059 /*
60 * The goal is to evict objects and amalgamate space in LRU order.
61 * The oldest idle objects reside on the inactive list, which is in
62 * retirement order. The next objects to retire are those on the (per
63 * ring) active list that do not have an outstanding flush. Once the
64 * hardware reports completion (the seqno is updated after the
65 * batchbuffer has been finished) the clean buffer objects would
66 * be retired to the inactive list. Any dirty objects would be added
67 * to the tail of the flushing list. So after processing the clean
68 * active objects we need to emit a MI_FLUSH to retire the flushing
69 * list, hence the retirement order of the flushing list is in
70 * advance of the dirty objects on the active lists.
71 *
72 * The retirement sequence is thus:
73 * 1. Inactive objects (already retired)
74 * 2. Clean active objects
75 * 3. Flushing list
76 * 4. Dirty active objects.
77 *
78 * On each list, the oldest objects lie at the HEAD with the freshest
79 * object on the TAIL.
80 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010081
Chris Wilsoncd377ea2010-08-07 11:01:24 +010082 INIT_LIST_HEAD(&unwind_list);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070083 if (mappable) {
84 BUG_ON(!i915_is_ggtt(vm));
Ben Widawsky5cef07e2013-07-16 16:50:08 -070085 drm_mm_init_scan_with_range(&vm->mm, min_size,
Ben Widawsky93bd8642013-07-16 16:50:06 -070086 alignment, cache_level, 0,
87 dev_priv->gtt.mappable_end);
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070088 } else
Ben Widawsky5cef07e2013-07-16 16:50:08 -070089 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010090
Chris Wilsoncd377ea2010-08-07 11:01:24 +010091 /* First see if there is a large enough contiguous idle region... */
Ben Widawskyca191b12013-07-31 17:00:14 -070092 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -070093 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +010094 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010095 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +010096
Chris Wilson86a1ee22012-08-11 15:41:04 +010097 if (nonblocking)
98 goto none;
99
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100100 /* Now merge in the soon-to-be-expired objects... */
Ben Widawskyca191b12013-07-31 17:00:14 -0700101 list_for_each_entry(vma, &vm->active_list, mm_list) {
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700102 if (mark_free(vma, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100103 goto found;
104 }
105
Chris Wilson86a1ee22012-08-11 15:41:04 +0100106none:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100107 /* Nothing found, clean up and bail out! */
Chris Wilson092de6f2011-01-10 14:21:05 +0000108 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200109 vma = list_first_entry(&unwind_list,
110 struct i915_vma,
Chris Wilson092de6f2011-01-10 14:21:05 +0000111 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700112 ret = drm_mm_scan_remove_block(&vma->node);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100113 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000114
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200115 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100116 }
117
118 /* We expect the caller to unpin, evict all and try again, or give up.
Ben Widawsky68c8c172013-09-11 14:57:50 -0700119 * So calling i915_gem_evict_vm() is unnecessary.
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100120 */
121 return -ENOSPC;
122
123found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100124 /* drm_mm doesn't allow any other other operations while
125 * scanning, therefore store to be evicted objects on a
126 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100127 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100128 while (!list_empty(&unwind_list)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200129 vma = list_first_entry(&unwind_list,
130 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000131 exec_list);
Ben Widawsky2f633152013-07-17 12:19:03 -0700132 if (drm_mm_scan_remove_block(&vma->node)) {
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200133 list_move(&vma->exec_list, &eviction_list);
134 drm_gem_object_reference(&vma->obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100135 continue;
136 }
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200137 list_del_init(&vma->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100138 }
139
Chris Wilsone39a0152010-09-29 22:23:05 +0100140 /* Unbinding will emit any required flushes */
141 while (!list_empty(&eviction_list)) {
Ben Widawsky8637b402013-08-16 13:29:33 -0700142 struct drm_gem_object *obj;
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200143 vma = list_first_entry(&eviction_list,
144 struct i915_vma,
Chris Wilson432e58e2010-11-25 19:32:06 +0000145 exec_list);
Ben Widawsky8637b402013-08-16 13:29:33 -0700146
147 obj = &vma->obj->base;
148 list_del_init(&vma->exec_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100149 if (ret == 0)
Ben Widawsky82a55ad2013-08-14 11:38:34 +0200150 ret = i915_vma_unbind(vma);
Chris Wilson092de6f2011-01-10 14:21:05 +0000151
Ben Widawsky8637b402013-08-16 13:29:33 -0700152 drm_gem_object_unreference(obj);
Chris Wilsone39a0152010-09-29 22:23:05 +0100153 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100154
Chris Wilsone39a0152010-09-29 22:23:05 +0100155 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100156}
157
Ben Widawsky68c8c172013-09-11 14:57:50 -0700158/**
159 * i915_gem_evict_vm - Try to free up VM space
160 *
161 * @vm: Address space to evict from
162 * @do_idle: Boolean directing whether to idle first.
163 *
164 * VM eviction is about freeing up virtual address space. If one wants fine
165 * grained eviction, they should see evict something for more details. In terms
166 * of freeing up actual system memory, this function may not accomplish the
167 * desired result. An object may be shared in multiple address space, and this
168 * function will not assert those objects be freed.
169 *
170 * Using do_idle will result in a more complete eviction because it retires, and
171 * inactivates current BOs.
172 */
173int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
Ben Widawsky7b796122013-09-11 14:57:49 -0700174{
175 struct i915_vma *vma, *next;
176 int ret;
177
Ben Widawskybcccff82013-09-24 09:57:56 -0700178 trace_i915_gem_evict_vm(vm);
179
Ben Widawsky7b796122013-09-11 14:57:49 -0700180 if (do_idle) {
181 ret = i915_gpu_idle(vm->dev);
182 if (ret)
183 return ret;
184
185 i915_gem_retire_requests(vm->dev);
186 }
187
188 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
189 if (vma->obj->pin_count == 0)
190 WARN_ON(i915_vma_unbind(vma));
191
192 return 0;
193}
194
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100195int
Chris Wilson6c085a72012-08-20 11:40:46 +0200196i915_gem_evict_everything(struct drm_device *dev)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100197{
198 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700199 struct i915_address_space *vm;
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700200 bool lists_empty = true;
Chris Wilsonb4519512012-05-11 14:29:30 +0100201 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100202
Ben Widawskyf6cd1f12013-07-31 17:00:11 -0700203 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
204 lists_empty = (list_empty(&vm->inactive_list) &&
205 list_empty(&vm->active_list));
206 if (!lists_empty)
207 lists_empty = false;
208 }
209
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100210 if (lists_empty)
211 return -ENOSPC;
212
Chris Wilson6c085a72012-08-20 11:40:46 +0200213 trace_i915_gem_evict_everything(dev);
Chris Wilsondb53a302011-02-03 11:57:46 +0000214
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700215 /* The gpu_idle will flush everything in the write domain to the
216 * active list. Then we must move everything off the active list
217 * with retire requests.
218 */
Chris Wilsonb4519512012-05-11 14:29:30 +0100219 ret = i915_gpu_idle(dev);
220 if (ret)
221 return ret;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700222
223 i915_gem_retire_requests(dev);
224
Chris Wilsona39d7ef2012-04-24 18:22:52 +0100225 /* Having flushed everything, unbind() should never raise an error */
Ben Widawsky7b796122013-09-11 14:57:49 -0700226 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
227 WARN_ON(i915_gem_evict_vm(vm, false));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100228
Chris Wilsonb4519512012-05-11 14:29:30 +0100229 return 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100230}