blob: 43b82350d8dca5228f6f1b9135ba80946e01b9ef [file] [log] [blame]
Chris Wilsonb47eb4a2010-08-07 11:01:23 +01001/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
David Howells760285e2012-10-02 18:01:07 +010029#include <drm/drmP.h>
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010030#include "i915_drv.h"
David Howells760285e2012-10-02 18:01:07 +010031#include <drm/i915_drm.h>
Chris Wilsondb53a302011-02-03 11:57:46 +000032#include "i915_trace.h"
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010033
Chris Wilsoncd377ea2010-08-07 11:01:24 +010034static bool
Chris Wilson05394f32010-11-08 19:18:58 +000035mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010036{
Chris Wilson1b502472012-04-24 15:47:30 +010037 if (obj->pin_count)
38 return false;
39
Chris Wilson432e58e2010-11-25 19:32:06 +000040 list_add(&obj->exec_list, unwind);
Ben Widawskyc6cfb322013-07-05 14:41:06 -070041 return drm_mm_scan_add_block(&obj->gtt_space);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010042}
43
44int
Daniel Vettera6e0aa42010-09-16 15:45:15 +020045i915_gem_evict_something(struct drm_device *dev, int min_size,
Chris Wilson42d6ab42012-07-26 11:49:32 +010046 unsigned alignment, unsigned cache_level,
Chris Wilson86a1ee22012-08-11 15:41:04 +010047 bool mappable, bool nonblocking)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010048{
49 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky5cef07e2013-07-16 16:50:08 -070050 struct i915_address_space *vm = &dev_priv->gtt.base;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010051 struct list_head eviction_list, unwind_list;
Chris Wilson05394f32010-11-08 19:18:58 +000052 struct drm_i915_gem_object *obj;
Chris Wilsoncd377ea2010-08-07 11:01:24 +010053 int ret = 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010054
Chris Wilsondb53a302011-02-03 11:57:46 +000055 trace_i915_gem_evict(dev, min_size, alignment, mappable);
56
Chris Wilsoncd377ea2010-08-07 11:01:24 +010057 /*
58 * The goal is to evict objects and amalgamate space in LRU order.
59 * The oldest idle objects reside on the inactive list, which is in
60 * retirement order. The next objects to retire are those on the (per
61 * ring) active list that do not have an outstanding flush. Once the
62 * hardware reports completion (the seqno is updated after the
63 * batchbuffer has been finished) the clean buffer objects would
64 * be retired to the inactive list. Any dirty objects would be added
65 * to the tail of the flushing list. So after processing the clean
66 * active objects we need to emit a MI_FLUSH to retire the flushing
67 * list, hence the retirement order of the flushing list is in
68 * advance of the dirty objects on the active lists.
69 *
70 * The retirement sequence is thus:
71 * 1. Inactive objects (already retired)
72 * 2. Clean active objects
73 * 3. Flushing list
74 * 4. Dirty active objects.
75 *
76 * On each list, the oldest objects lie at the HEAD with the freshest
77 * object on the TAIL.
78 */
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010079
Chris Wilsoncd377ea2010-08-07 11:01:24 +010080 INIT_LIST_HEAD(&unwind_list);
Daniel Vettera6e0aa42010-09-16 15:45:15 +020081 if (mappable)
Ben Widawsky5cef07e2013-07-16 16:50:08 -070082 drm_mm_init_scan_with_range(&vm->mm, min_size,
Ben Widawsky93bd8642013-07-16 16:50:06 -070083 alignment, cache_level, 0,
84 dev_priv->gtt.mappable_end);
Daniel Vettera6e0aa42010-09-16 15:45:15 +020085 else
Ben Widawsky5cef07e2013-07-16 16:50:08 -070086 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010087
Chris Wilsoncd377ea2010-08-07 11:01:24 +010088 /* First see if there is a large enough contiguous idle region... */
Ben Widawsky5cef07e2013-07-16 16:50:08 -070089 list_for_each_entry(obj, &vm->inactive_list, mm_list) {
Chris Wilson05394f32010-11-08 19:18:58 +000090 if (mark_free(obj, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +010091 goto found;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +010092 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +010093
Chris Wilson86a1ee22012-08-11 15:41:04 +010094 if (nonblocking)
95 goto none;
96
Chris Wilsoncd377ea2010-08-07 11:01:24 +010097 /* Now merge in the soon-to-be-expired objects... */
Ben Widawsky5cef07e2013-07-16 16:50:08 -070098 list_for_each_entry(obj, &vm->active_list, mm_list) {
Chris Wilson05394f32010-11-08 19:18:58 +000099 if (mark_free(obj, &unwind_list))
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100100 goto found;
101 }
102
Chris Wilson86a1ee22012-08-11 15:41:04 +0100103none:
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100104 /* Nothing found, clean up and bail out! */
Chris Wilson092de6f2011-01-10 14:21:05 +0000105 while (!list_empty(&unwind_list)) {
106 obj = list_first_entry(&unwind_list,
107 struct drm_i915_gem_object,
108 exec_list);
109
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700110 ret = drm_mm_scan_remove_block(&obj->gtt_space);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100111 BUG_ON(ret);
Chris Wilson092de6f2011-01-10 14:21:05 +0000112
113 list_del_init(&obj->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100114 }
115
116 /* We expect the caller to unpin, evict all and try again, or give up.
117 * So calling i915_gem_evict_everything() is unnecessary.
118 */
119 return -ENOSPC;
120
121found:
Chris Wilsone39a0152010-09-29 22:23:05 +0100122 /* drm_mm doesn't allow any other other operations while
123 * scanning, therefore store to be evicted objects on a
124 * temporary list. */
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100125 INIT_LIST_HEAD(&eviction_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100126 while (!list_empty(&unwind_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +0000127 obj = list_first_entry(&unwind_list,
128 struct drm_i915_gem_object,
Chris Wilson432e58e2010-11-25 19:32:06 +0000129 exec_list);
Ben Widawskyc6cfb322013-07-05 14:41:06 -0700130 if (drm_mm_scan_remove_block(&obj->gtt_space)) {
Chris Wilson432e58e2010-11-25 19:32:06 +0000131 list_move(&obj->exec_list, &eviction_list);
Chris Wilsonb6708242012-02-24 00:27:21 +0000132 drm_gem_object_reference(&obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100133 continue;
134 }
Chris Wilson432e58e2010-11-25 19:32:06 +0000135 list_del_init(&obj->exec_list);
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100136 }
137
Chris Wilsone39a0152010-09-29 22:23:05 +0100138 /* Unbinding will emit any required flushes */
139 while (!list_empty(&eviction_list)) {
Chris Wilson05394f32010-11-08 19:18:58 +0000140 obj = list_first_entry(&eviction_list,
141 struct drm_i915_gem_object,
Chris Wilson432e58e2010-11-25 19:32:06 +0000142 exec_list);
Chris Wilsone39a0152010-09-29 22:23:05 +0100143 if (ret == 0)
Chris Wilson05394f32010-11-08 19:18:58 +0000144 ret = i915_gem_object_unbind(obj);
Chris Wilson092de6f2011-01-10 14:21:05 +0000145
Chris Wilson432e58e2010-11-25 19:32:06 +0000146 list_del_init(&obj->exec_list);
Chris Wilson05394f32010-11-08 19:18:58 +0000147 drm_gem_object_unreference(&obj->base);
Chris Wilsone39a0152010-09-29 22:23:05 +0100148 }
Chris Wilsoncd377ea2010-08-07 11:01:24 +0100149
Chris Wilsone39a0152010-09-29 22:23:05 +0100150 return ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100151}
152
153int
Chris Wilson6c085a72012-08-20 11:40:46 +0200154i915_gem_evict_everything(struct drm_device *dev)
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100155{
156 drm_i915_private_t *dev_priv = dev->dev_private;
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700157 struct i915_address_space *vm = &dev_priv->gtt.base;
Chris Wilsona39d7ef2012-04-24 18:22:52 +0100158 struct drm_i915_gem_object *obj, *next;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100159 bool lists_empty;
Chris Wilsonb4519512012-05-11 14:29:30 +0100160 int ret;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100161
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700162 lists_empty = (list_empty(&vm->inactive_list) &&
163 list_empty(&vm->active_list));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100164 if (lists_empty)
165 return -ENOSPC;
166
Chris Wilson6c085a72012-08-20 11:40:46 +0200167 trace_i915_gem_evict_everything(dev);
Chris Wilsondb53a302011-02-03 11:57:46 +0000168
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700169 /* The gpu_idle will flush everything in the write domain to the
170 * active list. Then we must move everything off the active list
171 * with retire requests.
172 */
Chris Wilsonb4519512012-05-11 14:29:30 +0100173 ret = i915_gpu_idle(dev);
174 if (ret)
175 return ret;
Ben Widawskyb2da9fe2012-04-26 16:02:58 -0700176
177 i915_gem_retire_requests(dev);
178
Chris Wilsona39d7ef2012-04-24 18:22:52 +0100179 /* Having flushed everything, unbind() should never raise an error */
Ben Widawsky5cef07e2013-07-16 16:50:08 -0700180 list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
Chris Wilson6c085a72012-08-20 11:40:46 +0200181 if (obj->pin_count == 0)
182 WARN_ON(i915_gem_object_unbind(obj));
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100183
Chris Wilsonb4519512012-05-11 14:29:30 +0100184 return 0;
Chris Wilsonb47eb4a2010-08-07 11:01:23 +0100185}