blob: da05a2692a751078430c53080162d5fc3baa9acc [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
Chris Wilsondb53a302011-02-03 11:57:46 +000033#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010034
Chris Wilsoncd377ea2010-08-07 11:01:24 +010035static bool
Chris Wilson05394f32010-11-08 19:18:58 +000036mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010037{
Chris Wilson432e58e2010-11-25 19:32:06 +000038 list_add(&obj->exec_list, unwind);
Chris Wilson05394f32010-11-08 19:18:58 +000039 drm_gem_object_reference(&obj->base);
40 return drm_mm_scan_add_block(obj->gtt_space);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010041}
42
43int
Daniel Vettera6e0aa42010-09-16 15:45:15 +020044i915_gem_evict_something(struct drm_device *dev, int min_size,
Chris Wilson5eac3ab2010-10-31 08:49:47 +000045 unsigned alignment, bool mappable)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010046{
47 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010048 struct list_head eviction_list, unwind_list;
Chris Wilson05394f32010-11-08 19:18:58 +000049 struct drm_i915_gem_object *obj;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010050 int ret = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010051
Chris Wilsoncd377ea2010-08-07 11:01:24 +010052 i915_gem_retire_requests(dev);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010053
Chris Wilsoncd377ea2010-08-07 11:01:24 +010054 /* Re-check for free space after retiring requests */
Daniel Vettera6e0aa42010-09-16 15:45:15 +020055 if (mappable) {
56 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
Chris Wilson5eac3ab2010-10-31 08:49:47 +000057 min_size, alignment, 0,
Daniel Vettera6e0aa42010-09-16 15:45:15 +020058 dev_priv->mm.gtt_mappable_end,
59 0))
60 return 0;
61 } else {
62 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
63 min_size, alignment, 0))
64 return 0;
65 }
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010066
Chris Wilsondb53a302011-02-03 11:57:46 +000067 trace_i915_gem_evict(dev, min_size, alignment, mappable);
68
Chris Wilsoncd377ea2010-08-07 11:01:24 +010069 /*
70 * The goal is to evict objects and amalgamate space in LRU order.
71 * The oldest idle objects reside on the inactive list, which is in
72 * retirement order. The next objects to retire are those on the (per
73 * ring) active list that do not have an outstanding flush. Once the
74 * hardware reports completion (the seqno is updated after the
75 * batchbuffer has been finished) the clean buffer objects would
76 * be retired to the inactive list. Any dirty objects would be added
77 * to the tail of the flushing list. So after processing the clean
78 * active objects we need to emit a MI_FLUSH to retire the flushing
79 * list, hence the retirement order of the flushing list is in
80 * advance of the dirty objects on the active lists.
81 *
82 * The retirement sequence is thus:
83 * 1. Inactive objects (already retired)
84 * 2. Clean active objects
85 * 3. Flushing list
86 * 4. Dirty active objects.
87 *
88 * On each list, the oldest objects lie at the HEAD with the freshest
89 * object on the TAIL.
90 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010091
Chris Wilsoncd377ea2010-08-07 11:01:24 +010092 INIT_LIST_HEAD(&unwind_list);
Daniel Vettera6e0aa42010-09-16 15:45:15 +020093 if (mappable)
94 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
95 alignment, 0,
96 dev_priv->mm.gtt_mappable_end);
97 else
98 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010099
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100100 /* First see if there is a large enough contiguous idle region... */
Chris Wilson05394f32010-11-08 19:18:58 +0000101 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
102 if (mark_free(obj, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100103 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100104 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100105
106 /* Now merge in the soon-to-be-expired objects... */
Chris Wilson05394f32010-11-08 19:18:58 +0000107 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100108 /* Does the object require an outstanding flush? */
Chris Wilson05394f32010-11-08 19:18:58 +0000109 if (obj->base.write_domain || obj->pin_count)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100110 continue;
111
Chris Wilson05394f32010-11-08 19:18:58 +0000112 if (mark_free(obj, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100113 goto found;
114 }
115
116 /* Finally add anything with a pending flush (in order of retirement) */
Chris Wilson05394f32010-11-08 19:18:58 +0000117 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
118 if (obj->pin_count)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100119 continue;
120
Chris Wilson05394f32010-11-08 19:18:58 +0000121 if (mark_free(obj, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100122 goto found;
123 }
Chris Wilson05394f32010-11-08 19:18:58 +0000124 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
125 if (! obj->base.write_domain || obj->pin_count)
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100126 continue;
127
Chris Wilson05394f32010-11-08 19:18:58 +0000128 if (mark_free(obj, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100129 goto found;
130 }
131
132 /* Nothing found, clean up and bail out! */
Chris Wilson092de6f2011-01-10 14:21:05 +0000133 while (!list_empty(&unwind_list)) {
134 obj = list_first_entry(&unwind_list,
135 struct drm_i915_gem_object,
136 exec_list);
137
Chris Wilson05394f32010-11-08 19:18:58 +0000138 ret = drm_mm_scan_remove_block(obj->gtt_space);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100139 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000140
141 list_del_init(&obj->exec_list);
Chris Wilson05394f32010-11-08 19:18:58 +0000142 drm_gem_object_unreference(&obj->base);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100143 }
144
145 /* We expect the caller to unpin, evict all and try again, or give up.
146 * So calling i915_gem_evict_everything() is unnecessary.
147 */
148 return -ENOSPC;
149
150found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100151 /* drm_mm doesn't allow any other other operations while
152 * scanning, therefore store to be evicted objects on a
153 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100154 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100155 while (!list_empty(&unwind_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +0000156 obj = list_first_entry(&unwind_list,
157 struct drm_i915_gem_object,
Chris Wilson432e58e2010-11-25 19:32:06 +0000158 exec_list);
Chris Wilson05394f32010-11-08 19:18:58 +0000159 if (drm_mm_scan_remove_block(obj->gtt_space)) {
Chris Wilson432e58e2010-11-25 19:32:06 +0000160 list_move(&obj->exec_list, &eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100161 continue;
162 }
Chris Wilson432e58e2010-11-25 19:32:06 +0000163 list_del_init(&obj->exec_list);
Chris Wilson05394f32010-11-08 19:18:58 +0000164 drm_gem_object_unreference(&obj->base);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100165 }
166
Chris Wilsone39a0152010-09-29 22:23:05 +0100167 /* Unbinding will emit any required flushes */
168 while (!list_empty(&eviction_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +0000169 obj = list_first_entry(&eviction_list,
170 struct drm_i915_gem_object,
Chris Wilson432e58e2010-11-25 19:32:06 +0000171 exec_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100172 if (ret == 0)
Chris Wilson05394f32010-11-08 19:18:58 +0000173 ret = i915_gem_object_unbind(obj);
Chris Wilson092de6f2011-01-10 14:21:05 +0000174
Chris Wilson432e58e2010-11-25 19:32:06 +0000175 list_del_init(&obj->exec_list);
Chris Wilson05394f32010-11-08 19:18:58 +0000176 drm_gem_object_unreference(&obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100177 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100178
Chris Wilsone39a0152010-09-29 22:23:05 +0100179 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100180}
181
182int
Chris Wilson5eac3ab2010-10-31 08:49:47 +0000183i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100184{
185 drm_i915_private_t *dev_priv = dev->dev_private;
186 int ret;
187 bool lists_empty;
188
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100189 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
190 list_empty(&dev_priv->mm.flushing_list) &&
Chris Wilson395b70b2010-10-28 21:28:46 +0100191 list_empty(&dev_priv->mm.active_list));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100192 if (lists_empty)
193 return -ENOSPC;
194
Chris Wilsondb53a302011-02-03 11:57:46 +0000195 trace_i915_gem_evict_everything(dev, purgeable_only);
196
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100197 /* Flush everything (on to the inactive lists) and evict */
198 ret = i915_gpu_idle(dev);
199 if (ret)
200 return ret;
201
202 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
203
Chris Wilson5eac3ab2010-10-31 08:49:47 +0000204 return i915_gem_evict_inactive(dev, purgeable_only);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100205}
206
207/** Unbinds all inactive objects. */
208int
Chris Wilson5eac3ab2010-10-31 08:49:47 +0000209i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100210{
211 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilson5eac3ab2010-10-31 08:49:47 +0000212 struct drm_i915_gem_object *obj, *next;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100213
Chris Wilson5eac3ab2010-10-31 08:49:47 +0000214 list_for_each_entry_safe(obj, next,
215 &dev_priv->mm.inactive_list, mm_list) {
216 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
Chris Wilson05394f32010-11-08 19:18:58 +0000217 int ret = i915_gem_object_unbind(obj);
Chris Wilson5eac3ab2010-10-31 08:49:47 +0000218 if (ret)
219 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100220 }
221 }
222
223 return 0;
224}