Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2010 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Chris Wilson <chris@chris-wilson.co.uuk> |
| 26 | * |
| 27 | */ |
| 28 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 30 | #include <drm/i915_drm.h> |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 31 | |
| 32 | #include "i915_drv.h" |
| 33 | #include "intel_drv.h" |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 34 | #include "i915_trace.h" |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 35 | |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 36 | static bool |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 37 | mark_free(struct i915_vma *vma, struct list_head *unwind) |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 38 | { |
Chris Wilson | 3036537 | 2014-01-28 18:08:38 +0000 | [diff] [blame] | 39 | if (vma->pin_count) |
Chris Wilson | 1b50247 | 2012-04-24 15:47:30 +0100 | [diff] [blame] | 40 | return false; |
| 41 | |
Daniel Vetter | b93dab6 | 2013-08-26 11:23:47 +0200 | [diff] [blame] | 42 | if (WARN_ON(!list_empty(&vma->exec_list))) |
| 43 | return false; |
| 44 | |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 45 | list_add(&vma->exec_list, unwind); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 46 | return drm_mm_scan_add_block(&vma->node); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 47 | } |
| 48 | |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 49 | /** |
| 50 | * i915_gem_evict_something - Evict vmas to make room for binding a new one |
| 51 | * @dev: drm_device |
| 52 | * @vm: address space to evict from |
| 53 | * @size: size of the desired free space |
| 54 | * @alignment: alignment constraint of the desired free space |
| 55 | * @cache_level: cache_level for the desired space |
| 56 | * @mappable: whether the free space must be mappable |
| 57 | * @nonblocking: whether evicting active objects is allowed or not |
| 58 | * |
| 59 | * This function will try to evict vmas until a free space satisfying the |
| 60 | * requirements is found. Callers must check first whether any such hole exists |
| 61 | * already before calling this function. |
| 62 | * |
| 63 | * This function is used by the object/vma binding code. |
| 64 | * |
| 65 | * To clarify: This is for freeing up virtual address space, not for freeing |
| 66 | * memory in e.g. the shrinker. |
| 67 | */ |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 68 | int |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 69 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, |
| 70 | int min_size, unsigned alignment, unsigned cache_level, |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 71 | unsigned flags) |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 72 | { |
Jani Nikula | 50227e1 | 2014-03-31 14:27:21 +0300 | [diff] [blame^] | 73 | struct drm_i915_private *dev_priv = dev->dev_private; |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 74 | struct list_head eviction_list, unwind_list; |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 75 | struct i915_vma *vma; |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 76 | int ret = 0; |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 77 | int pass = 0; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 78 | |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 79 | trace_i915_gem_evict(dev, min_size, alignment, flags); |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 80 | |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 81 | /* |
| 82 | * The goal is to evict objects and amalgamate space in LRU order. |
| 83 | * The oldest idle objects reside on the inactive list, which is in |
| 84 | * retirement order. The next objects to retire are those on the (per |
| 85 | * ring) active list that do not have an outstanding flush. Once the |
| 86 | * hardware reports completion (the seqno is updated after the |
| 87 | * batchbuffer has been finished) the clean buffer objects would |
| 88 | * be retired to the inactive list. Any dirty objects would be added |
| 89 | * to the tail of the flushing list. So after processing the clean |
| 90 | * active objects we need to emit a MI_FLUSH to retire the flushing |
| 91 | * list, hence the retirement order of the flushing list is in |
| 92 | * advance of the dirty objects on the active lists. |
| 93 | * |
| 94 | * The retirement sequence is thus: |
| 95 | * 1. Inactive objects (already retired) |
| 96 | * 2. Clean active objects |
| 97 | * 3. Flushing list |
| 98 | * 4. Dirty active objects. |
| 99 | * |
| 100 | * On each list, the oldest objects lie at the HEAD with the freshest |
| 101 | * object on the TAIL. |
| 102 | */ |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 103 | |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 104 | INIT_LIST_HEAD(&unwind_list); |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 105 | if (flags & PIN_MAPPABLE) { |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 106 | BUG_ON(!i915_is_ggtt(vm)); |
Ben Widawsky | 5cef07e | 2013-07-16 16:50:08 -0700 | [diff] [blame] | 107 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
Ben Widawsky | 93bd864 | 2013-07-16 16:50:06 -0700 | [diff] [blame] | 108 | alignment, cache_level, 0, |
| 109 | dev_priv->gtt.mappable_end); |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 110 | } else |
Ben Widawsky | 5cef07e | 2013-07-16 16:50:08 -0700 | [diff] [blame] | 111 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 112 | |
Chris Wilson | ad071ac | 2013-12-09 10:37:24 +0000 | [diff] [blame] | 113 | search_again: |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 114 | /* First see if there is a large enough contiguous idle region... */ |
Ben Widawsky | ca191b1 | 2013-07-31 17:00:14 -0700 | [diff] [blame] | 115 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 116 | if (mark_free(vma, &unwind_list)) |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 117 | goto found; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 118 | } |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 119 | |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 120 | if (flags & PIN_NONBLOCK) |
Chris Wilson | 86a1ee2 | 2012-08-11 15:41:04 +0100 | [diff] [blame] | 121 | goto none; |
| 122 | |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 123 | /* Now merge in the soon-to-be-expired objects... */ |
Ben Widawsky | ca191b1 | 2013-07-31 17:00:14 -0700 | [diff] [blame] | 124 | list_for_each_entry(vma, &vm->active_list, mm_list) { |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 125 | if (mark_free(vma, &unwind_list)) |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 126 | goto found; |
| 127 | } |
| 128 | |
Chris Wilson | 86a1ee2 | 2012-08-11 15:41:04 +0100 | [diff] [blame] | 129 | none: |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 130 | /* Nothing found, clean up and bail out! */ |
Chris Wilson | 092de6f | 2011-01-10 14:21:05 +0000 | [diff] [blame] | 131 | while (!list_empty(&unwind_list)) { |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 132 | vma = list_first_entry(&unwind_list, |
| 133 | struct i915_vma, |
Chris Wilson | 092de6f | 2011-01-10 14:21:05 +0000 | [diff] [blame] | 134 | exec_list); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 135 | ret = drm_mm_scan_remove_block(&vma->node); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 136 | BUG_ON(ret); |
Chris Wilson | 092de6f | 2011-01-10 14:21:05 +0000 | [diff] [blame] | 137 | |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 138 | list_del_init(&vma->exec_list); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 139 | } |
| 140 | |
Chris Wilson | ad071ac | 2013-12-09 10:37:24 +0000 | [diff] [blame] | 141 | /* Can we unpin some objects such as idle hw contents, |
| 142 | * or pending flips? |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 143 | */ |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 144 | if (flags & PIN_NONBLOCK) |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 145 | return -ENOSPC; |
Chris Wilson | ad071ac | 2013-12-09 10:37:24 +0000 | [diff] [blame] | 146 | |
| 147 | /* Only idle the GPU and repeat the search once */ |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 148 | if (pass++ == 0) { |
| 149 | ret = i915_gpu_idle(dev); |
| 150 | if (ret) |
| 151 | return ret; |
| 152 | |
| 153 | i915_gem_retire_requests(dev); |
| 154 | goto search_again; |
| 155 | } |
| 156 | |
| 157 | /* If we still have pending pageflip completions, drop |
| 158 | * back to userspace to give our workqueues time to |
| 159 | * acquire our locks and unpin the old scanouts. |
| 160 | */ |
| 161 | return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC; |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 162 | |
| 163 | found: |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 164 | /* drm_mm doesn't allow any other other operations while |
| 165 | * scanning, therefore store to be evicted objects on a |
| 166 | * temporary list. */ |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 167 | INIT_LIST_HEAD(&eviction_list); |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 168 | while (!list_empty(&unwind_list)) { |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 169 | vma = list_first_entry(&unwind_list, |
| 170 | struct i915_vma, |
Chris Wilson | 432e58e | 2010-11-25 19:32:06 +0000 | [diff] [blame] | 171 | exec_list); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 172 | if (drm_mm_scan_remove_block(&vma->node)) { |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 173 | list_move(&vma->exec_list, &eviction_list); |
| 174 | drm_gem_object_reference(&vma->obj->base); |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 175 | continue; |
| 176 | } |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 177 | list_del_init(&vma->exec_list); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 178 | } |
| 179 | |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 180 | /* Unbinding will emit any required flushes */ |
| 181 | while (!list_empty(&eviction_list)) { |
Ben Widawsky | 8637b40 | 2013-08-16 13:29:33 -0700 | [diff] [blame] | 182 | struct drm_gem_object *obj; |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 183 | vma = list_first_entry(&eviction_list, |
| 184 | struct i915_vma, |
Chris Wilson | 432e58e | 2010-11-25 19:32:06 +0000 | [diff] [blame] | 185 | exec_list); |
Ben Widawsky | 8637b40 | 2013-08-16 13:29:33 -0700 | [diff] [blame] | 186 | |
| 187 | obj = &vma->obj->base; |
| 188 | list_del_init(&vma->exec_list); |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 189 | if (ret == 0) |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 190 | ret = i915_vma_unbind(vma); |
Chris Wilson | 092de6f | 2011-01-10 14:21:05 +0000 | [diff] [blame] | 191 | |
Ben Widawsky | 8637b40 | 2013-08-16 13:29:33 -0700 | [diff] [blame] | 192 | drm_gem_object_unreference(obj); |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 193 | } |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 194 | |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 195 | return ret; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 196 | } |
| 197 | |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 198 | /** |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 199 | * i915_gem_evict_vm - Evict all idle vmas from a vm |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 200 | * |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 201 | * @vm: Address space to cleanse |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 202 | * @do_idle: Boolean directing whether to idle first. |
| 203 | * |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 204 | * This function evicts all idles vmas from a vm. If all unpinned vmas should be |
| 205 | * evicted the @do_idle needs to be set to true. |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 206 | * |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 207 | * This is used by the execbuf code as a last-ditch effort to defragment the |
| 208 | * address space. |
| 209 | * |
| 210 | * To clarify: This is for freeing up virtual address space, not for freeing |
| 211 | * memory in e.g. the shrinker. |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 212 | */ |
| 213 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 214 | { |
| 215 | struct i915_vma *vma, *next; |
| 216 | int ret; |
| 217 | |
Ben Widawsky | bcccff8 | 2013-09-24 09:57:56 -0700 | [diff] [blame] | 218 | trace_i915_gem_evict_vm(vm); |
| 219 | |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 220 | if (do_idle) { |
| 221 | ret = i915_gpu_idle(vm->dev); |
| 222 | if (ret) |
| 223 | return ret; |
| 224 | |
| 225 | i915_gem_retire_requests(vm->dev); |
| 226 | } |
| 227 | |
| 228 | list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) |
Ben Widawsky | d7f46fc | 2013-12-06 14:10:55 -0800 | [diff] [blame] | 229 | if (vma->pin_count == 0) |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 230 | WARN_ON(i915_vma_unbind(vma)); |
| 231 | |
| 232 | return 0; |
| 233 | } |
| 234 | |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 235 | /** |
| 236 | * i915_gem_evict_everything - Try to evict all objects |
| 237 | * @dev: Device to evict objects for |
| 238 | * |
| 239 | * This functions tries to evict all gem objects from all address spaces. Used |
| 240 | * by the shrinker as a last-ditch effort and for suspend, before releasing the |
| 241 | * backing storage of all unbound objects. |
| 242 | */ |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 243 | int |
Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 244 | i915_gem_evict_everything(struct drm_device *dev) |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 245 | { |
Jani Nikula | 50227e1 | 2014-03-31 14:27:21 +0300 | [diff] [blame^] | 246 | struct drm_i915_private *dev_priv = dev->dev_private; |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 247 | struct i915_address_space *vm; |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 248 | bool lists_empty = true; |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 249 | int ret; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 250 | |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 251 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
| 252 | lists_empty = (list_empty(&vm->inactive_list) && |
| 253 | list_empty(&vm->active_list)); |
| 254 | if (!lists_empty) |
| 255 | lists_empty = false; |
| 256 | } |
| 257 | |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 258 | if (lists_empty) |
| 259 | return -ENOSPC; |
| 260 | |
Chris Wilson | 6c085a7 | 2012-08-20 11:40:46 +0200 | [diff] [blame] | 261 | trace_i915_gem_evict_everything(dev); |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 262 | |
Ben Widawsky | b2da9fe | 2012-04-26 16:02:58 -0700 | [diff] [blame] | 263 | /* The gpu_idle will flush everything in the write domain to the |
| 264 | * active list. Then we must move everything off the active list |
| 265 | * with retire requests. |
| 266 | */ |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 267 | ret = i915_gpu_idle(dev); |
| 268 | if (ret) |
| 269 | return ret; |
Ben Widawsky | b2da9fe | 2012-04-26 16:02:58 -0700 | [diff] [blame] | 270 | |
| 271 | i915_gem_retire_requests(dev); |
| 272 | |
Chris Wilson | a39d7ef | 2012-04-24 18:22:52 +0100 | [diff] [blame] | 273 | /* Having flushed everything, unbind() should never raise an error */ |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 274 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) |
| 275 | WARN_ON(i915_gem_evict_vm(vm, false)); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 276 | |
Chris Wilson | b451951 | 2012-05-11 14:29:30 +0100 | [diff] [blame] | 277 | return 0; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 278 | } |