blob: 3a4215f316524a847dd7e3e3d92ac1f0448c9fec [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
33
Chris Wilsoncd377ea2010-08-07 11:01:24 +010034static bool
35mark_free(struct drm_i915_gem_object *obj_priv,
36 struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010037{
Chris Wilsoncd377ea2010-08-07 11:01:24 +010038 list_add(&obj_priv->evict_list, unwind);
Chris Wilsonaf626102010-09-20 10:31:40 +010039 drm_gem_object_reference(&obj_priv->base);
Chris Wilsoncd377ea2010-08-07 11:01:24 +010040 return drm_mm_scan_add_block(obj_priv->gtt_space);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010041}
42
43int
Daniel Vettera6e0aa42010-09-16 15:45:15 +020044i915_gem_evict_something(struct drm_device *dev, int min_size,
45 unsigned alignment, bool mappable)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010046{
47 drm_i915_private_t *dev_priv = dev->dev_private;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010048 struct list_head eviction_list, unwind_list;
Chris Wilsone39a0152010-09-29 22:23:05 +010049 struct drm_i915_gem_object *obj_priv;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010050 int ret = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010051
Chris Wilsoncd377ea2010-08-07 11:01:24 +010052 i915_gem_retire_requests(dev);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010053
Chris Wilsoncd377ea2010-08-07 11:01:24 +010054 /* Re-check for free space after retiring requests */
Daniel Vettera6e0aa42010-09-16 15:45:15 +020055 if (mappable) {
56 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
57 min_size, alignment, 0,
58 dev_priv->mm.gtt_mappable_end,
59 0))
60 return 0;
61 } else {
62 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
63 min_size, alignment, 0))
64 return 0;
65 }
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010066
Chris Wilsoncd377ea2010-08-07 11:01:24 +010067 /*
68 * The goal is to evict objects and amalgamate space in LRU order.
69 * The oldest idle objects reside on the inactive list, which is in
70 * retirement order. The next objects to retire are those on the (per
71 * ring) active list that do not have an outstanding flush. Once the
72 * hardware reports completion (the seqno is updated after the
73 * batchbuffer has been finished) the clean buffer objects would
74 * be retired to the inactive list. Any dirty objects would be added
75 * to the tail of the flushing list. So after processing the clean
76 * active objects we need to emit a MI_FLUSH to retire the flushing
77 * list, hence the retirement order of the flushing list is in
78 * advance of the dirty objects on the active lists.
79 *
80 * The retirement sequence is thus:
81 * 1. Inactive objects (already retired)
82 * 2. Clean active objects
83 * 3. Flushing list
84 * 4. Dirty active objects.
85 *
86 * On each list, the oldest objects lie at the HEAD with the freshest
87 * object on the TAIL.
88 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010089
Chris Wilsoncd377ea2010-08-07 11:01:24 +010090 INIT_LIST_HEAD(&unwind_list);
Daniel Vettera6e0aa42010-09-16 15:45:15 +020091 if (mappable)
92 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
93 alignment, 0,
94 dev_priv->mm.gtt_mappable_end);
95 else
96 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010097
Chris Wilsoncd377ea2010-08-07 11:01:24 +010098 /* First see if there is a large enough contiguous idle region... */
Chris Wilson69dc4982010-10-19 10:36:51 +010099 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100100 if (mark_free(obj_priv, &unwind_list))
101 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100102 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100103
104 /* Now merge in the soon-to-be-expired objects... */
Chris Wilson69dc4982010-10-19 10:36:51 +0100105 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100106 /* Does the object require an outstanding flush? */
107 if (obj_priv->base.write_domain || obj_priv->pin_count)
108 continue;
109
110 if (mark_free(obj_priv, &unwind_list))
111 goto found;
112 }
113
114 /* Finally add anything with a pending flush (in order of retirement) */
Chris Wilson69dc4982010-10-19 10:36:51 +0100115 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100116 if (obj_priv->pin_count)
117 continue;
118
119 if (mark_free(obj_priv, &unwind_list))
120 goto found;
121 }
Chris Wilson69dc4982010-10-19 10:36:51 +0100122 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100123 if (! obj_priv->base.write_domain || obj_priv->pin_count)
124 continue;
125
126 if (mark_free(obj_priv, &unwind_list))
127 goto found;
128 }
129
130 /* Nothing found, clean up and bail out! */
131 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
132 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
133 BUG_ON(ret);
Chris Wilsonaf626102010-09-20 10:31:40 +0100134 drm_gem_object_unreference(&obj_priv->base);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100135 }
136
137 /* We expect the caller to unpin, evict all and try again, or give up.
138 * So calling i915_gem_evict_everything() is unnecessary.
139 */
140 return -ENOSPC;
141
142found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100143 /* drm_mm doesn't allow any other other operations while
144 * scanning, therefore store to be evicted objects on a
145 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100146 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100147 while (!list_empty(&unwind_list)) {
148 obj_priv = list_first_entry(&unwind_list,
149 struct drm_i915_gem_object,
150 evict_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100151 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100152 list_move(&obj_priv->evict_list, &eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100153 continue;
154 }
155 list_del(&obj_priv->evict_list);
Chris Wilsonaf626102010-09-20 10:31:40 +0100156 drm_gem_object_unreference(&obj_priv->base);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100157 }
158
Chris Wilsone39a0152010-09-29 22:23:05 +0100159 /* Unbinding will emit any required flushes */
160 while (!list_empty(&eviction_list)) {
161 obj_priv = list_first_entry(&eviction_list,
162 struct drm_i915_gem_object,
163 evict_list);
164 if (ret == 0)
165 ret = i915_gem_object_unbind(&obj_priv->base);
166 list_del(&obj_priv->evict_list);
167 drm_gem_object_unreference(&obj_priv->base);
168 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100169
Chris Wilsone39a0152010-09-29 22:23:05 +0100170 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100171}
172
173int
174i915_gem_evict_everything(struct drm_device *dev)
175{
176 drm_i915_private_t *dev_priv = dev->dev_private;
177 int ret;
178 bool lists_empty;
179
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100180 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
181 list_empty(&dev_priv->mm.flushing_list) &&
182 list_empty(&dev_priv->render_ring.active_list) &&
Chris Wilson549f7362010-10-19 11:19:32 +0100183 list_empty(&dev_priv->bsd_ring.active_list) &&
184 list_empty(&dev_priv->blt_ring.active_list));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100185 if (lists_empty)
186 return -ENOSPC;
187
188 /* Flush everything (on to the inactive lists) and evict */
189 ret = i915_gpu_idle(dev);
190 if (ret)
191 return ret;
192
193 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
194
195 ret = i915_gem_evict_inactive(dev);
196 if (ret)
197 return ret;
198
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100199 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
200 list_empty(&dev_priv->mm.flushing_list) &&
201 list_empty(&dev_priv->render_ring.active_list) &&
Chris Wilson549f7362010-10-19 11:19:32 +0100202 list_empty(&dev_priv->bsd_ring.active_list) &&
203 list_empty(&dev_priv->blt_ring.active_list));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100204 BUG_ON(!lists_empty);
205
206 return 0;
207}
208
209/** Unbinds all inactive objects. */
210int
211i915_gem_evict_inactive(struct drm_device *dev)
212{
213 drm_i915_private_t *dev_priv = dev->dev_private;
214
215 while (!list_empty(&dev_priv->mm.inactive_list)) {
216 struct drm_gem_object *obj;
217 int ret;
218
219 obj = &list_first_entry(&dev_priv->mm.inactive_list,
220 struct drm_i915_gem_object,
Chris Wilson69dc4982010-10-19 10:36:51 +0100221 mm_list)->base;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100222
223 ret = i915_gem_object_unbind(obj);
224 if (ret != 0) {
225 DRM_ERROR("Error unbinding object: %d\n", ret);
226 return ret;
227 }
228 }
229
230 return 0;
231}