Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008-2010 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * Chris Wilson <chris@chris-wilson.co.uuk> |
| 26 | * |
| 27 | */ |
| 28 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 29 | #include <drm/drmP.h> |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 30 | #include <drm/i915_drm.h> |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 31 | |
| 32 | #include "i915_drv.h" |
| 33 | #include "intel_drv.h" |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 34 | #include "i915_trace.h" |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 35 | |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 36 | static bool ggtt_is_idle(struct drm_i915_private *dev_priv) |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 37 | { |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 38 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 39 | struct intel_engine_cs *engine; |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 40 | enum intel_engine_id id; |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 41 | |
Akash Goel | 3b3f165 | 2016-10-13 22:44:48 +0530 | [diff] [blame] | 42 | for_each_engine(engine, dev_priv, id) { |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 43 | struct intel_timeline *tl; |
| 44 | |
| 45 | tl = &ggtt->base.timeline.engine[engine->id]; |
| 46 | if (i915_gem_active_isset(&tl->last_request)) |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 47 | return false; |
| 48 | } |
| 49 | |
| 50 | return true; |
| 51 | } |
| 52 | |
| 53 | static bool |
Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 54 | mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 55 | { |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 56 | if (i915_vma_is_pinned(vma)) |
Chris Wilson | 1b50247 | 2012-04-24 15:47:30 +0100 | [diff] [blame] | 57 | return false; |
| 58 | |
Daniel Vetter | b93dab6 | 2013-08-26 11:23:47 +0200 | [diff] [blame] | 59 | if (WARN_ON(!list_empty(&vma->exec_list))) |
| 60 | return false; |
| 61 | |
Chris Wilson | 275f039 | 2016-10-24 13:42:14 +0100 | [diff] [blame] | 62 | if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link)) |
Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 63 | return false; |
| 64 | |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 65 | list_add(&vma->exec_list, unwind); |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 66 | return drm_mm_scan_add_block(&vma->node); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 67 | } |
| 68 | |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 69 | /** |
| 70 | * i915_gem_evict_something - Evict vmas to make room for binding a new one |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 71 | * @vm: address space to evict from |
Daniel Vetter | 7838a63 | 2015-01-05 14:36:59 +0100 | [diff] [blame] | 72 | * @min_size: size of the desired free space |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 73 | * @alignment: alignment constraint of the desired free space |
| 74 | * @cache_level: cache_level for the desired space |
Daniel Vetter | 7838a63 | 2015-01-05 14:36:59 +0100 | [diff] [blame] | 75 | * @start: start (inclusive) of the range from which to evict objects |
| 76 | * @end: end (exclusive) of the range from which to evict objects |
| 77 | * @flags: additional flags to control the eviction algorithm |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 78 | * |
| 79 | * This function will try to evict vmas until a free space satisfying the |
| 80 | * requirements is found. Callers must check first whether any such hole exists |
| 81 | * already before calling this function. |
| 82 | * |
| 83 | * This function is used by the object/vma binding code. |
| 84 | * |
Daniel Vetter | eb0b44a | 2015-03-18 14:47:59 +0100 | [diff] [blame] | 85 | * Since this function is only used to free up virtual address space it only |
| 86 | * ignores pinned vmas, and not object where the backing storage itself is |
| 87 | * pinned. Hence obj->pages_pin_count does not protect against eviction. |
| 88 | * |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 89 | * To clarify: This is for freeing up virtual address space, not for freeing |
| 90 | * memory in e.g. the shrinker. |
| 91 | */ |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 92 | int |
Chris Wilson | e522ac2 | 2016-08-04 16:32:18 +0100 | [diff] [blame] | 93 | i915_gem_evict_something(struct i915_address_space *vm, |
Chris Wilson | 2ffffd0 | 2016-08-04 16:32:22 +0100 | [diff] [blame] | 94 | u64 min_size, u64 alignment, |
| 95 | unsigned cache_level, |
| 96 | u64 start, u64 end, |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 97 | unsigned flags) |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 98 | { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 99 | struct drm_i915_private *dev_priv = vm->i915; |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 100 | struct list_head eviction_list; |
| 101 | struct list_head *phases[] = { |
| 102 | &vm->inactive_list, |
| 103 | &vm->active_list, |
| 104 | NULL, |
| 105 | }, **phase; |
| 106 | struct i915_vma *vma, *next; |
| 107 | int ret; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 108 | |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 109 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
Chris Wilson | e522ac2 | 2016-08-04 16:32:18 +0100 | [diff] [blame] | 110 | trace_i915_gem_evict(vm, min_size, alignment, flags); |
Chris Wilson | db53a30 | 2011-02-03 11:57:46 +0000 | [diff] [blame] | 111 | |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 112 | /* |
| 113 | * The goal is to evict objects and amalgamate space in LRU order. |
| 114 | * The oldest idle objects reside on the inactive list, which is in |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 115 | * retirement order. The next objects to retire are those in flight, |
| 116 | * on the active list, again in retirement order. |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 117 | * |
| 118 | * The retirement sequence is thus: |
| 119 | * 1. Inactive objects (already retired) |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 120 | * 2. Active objects (will stall on unbinding) |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 121 | * |
| 122 | * On each list, the oldest objects lie at the HEAD with the freshest |
| 123 | * object on the TAIL. |
| 124 | */ |
Chris Wilson | d23db88 | 2014-05-23 08:48:08 +0200 | [diff] [blame] | 125 | if (start != 0 || end != vm->total) { |
Ben Widawsky | 5cef07e | 2013-07-16 16:50:08 -0700 | [diff] [blame] | 126 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
Chris Wilson | d23db88 | 2014-05-23 08:48:08 +0200 | [diff] [blame] | 127 | alignment, cache_level, |
| 128 | start, end); |
Ben Widawsky | f6cd1f1 | 2013-07-31 17:00:11 -0700 | [diff] [blame] | 129 | } else |
Ben Widawsky | 5cef07e | 2013-07-16 16:50:08 -0700 | [diff] [blame] | 130 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 131 | |
Daniel Vetter | 1ec9e26 | 2014-02-14 14:01:11 +0100 | [diff] [blame] | 132 | if (flags & PIN_NONBLOCK) |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 133 | phases[1] = NULL; |
Chris Wilson | 86a1ee2 | 2012-08-11 15:41:04 +0100 | [diff] [blame] | 134 | |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 135 | search_again: |
| 136 | INIT_LIST_HEAD(&eviction_list); |
| 137 | phase = phases; |
| 138 | do { |
| 139 | list_for_each_entry(vma, *phase, vm_link) |
Chris Wilson | 8211887 | 2016-08-18 17:17:05 +0100 | [diff] [blame] | 140 | if (mark_free(vma, flags, &eviction_list)) |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 141 | goto found; |
| 142 | } while (*++phase); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 143 | |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 144 | /* Nothing found, clean up and bail out! */ |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 145 | list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { |
Ben Widawsky | 2f63315 | 2013-07-17 12:19:03 -0700 | [diff] [blame] | 146 | ret = drm_mm_scan_remove_block(&vma->node); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 147 | BUG_ON(ret); |
Chris Wilson | 092de6f | 2011-01-10 14:21:05 +0000 | [diff] [blame] | 148 | |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 149 | INIT_LIST_HEAD(&vma->exec_list); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 150 | } |
| 151 | |
Chris Wilson | ad071ac | 2013-12-09 10:37:24 +0000 | [diff] [blame] | 152 | /* Can we unpin some objects such as idle hw contents, |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 153 | * or pending flips? But since only the GGTT has global entries |
| 154 | * such as scanouts, rinbuffers and contexts, we can skip the |
| 155 | * purge when inspecting per-process local address spaces. |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 156 | */ |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 157 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 158 | return -ENOSPC; |
Chris Wilson | ad071ac | 2013-12-09 10:37:24 +0000 | [diff] [blame] | 159 | |
Chris Wilson | 80b204b | 2016-10-28 13:58:58 +0100 | [diff] [blame] | 160 | if (ggtt_is_idle(dev_priv)) { |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 161 | /* If we still have pending pageflip completions, drop |
| 162 | * back to userspace to give our workqueues time to |
| 163 | * acquire our locks and unpin the old scanouts. |
| 164 | */ |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 165 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 166 | } |
| 167 | |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 168 | /* Not everything in the GGTT is tracked via vma (otherwise we |
| 169 | * could evict as required with minimal stalling) so we are forced |
| 170 | * to idle the GPU and explicitly retire outstanding requests in |
| 171 | * the hopes that we can then remove contexts and the like only |
| 172 | * bound by their active reference. |
Chris Wilson | 74e21ac | 2014-01-20 10:17:37 +0000 | [diff] [blame] | 173 | */ |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 174 | ret = i915_gem_switch_to_kernel_context(dev_priv); |
| 175 | if (ret) |
| 176 | return ret; |
| 177 | |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 178 | ret = i915_gem_wait_for_idle(dev_priv, |
| 179 | I915_WAIT_INTERRUPTIBLE | |
| 180 | I915_WAIT_LOCKED); |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 181 | if (ret) |
| 182 | return ret; |
| 183 | |
| 184 | i915_gem_retire_requests(dev_priv); |
| 185 | goto search_again; |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 186 | |
| 187 | found: |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 188 | /* drm_mm doesn't allow any other other operations while |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 189 | * scanning, therefore store to-be-evicted objects on a |
| 190 | * temporary list and take a reference for all before |
| 191 | * calling unbind (which may remove the active reference |
| 192 | * of any of our objects, thus corrupting the list). |
| 193 | */ |
| 194 | list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { |
| 195 | if (drm_mm_scan_remove_block(&vma->node)) |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 196 | __i915_vma_pin(vma); |
Chris Wilson | 9332f3b | 2016-08-04 16:32:17 +0100 | [diff] [blame] | 197 | else |
| 198 | list_del_init(&vma->exec_list); |
Chris Wilson | cd377ea | 2010-08-07 11:01:24 +0100 | [diff] [blame] | 199 | } |
| 200 | |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 201 | /* Unbinding will emit any required flushes */ |
| 202 | while (!list_empty(&eviction_list)) { |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 203 | vma = list_first_entry(&eviction_list, |
| 204 | struct i915_vma, |
Chris Wilson | 432e58e | 2010-11-25 19:32:06 +0000 | [diff] [blame] | 205 | exec_list); |
Ben Widawsky | 8637b40 | 2013-08-16 13:29:33 -0700 | [diff] [blame] | 206 | |
Ben Widawsky | 8637b40 | 2013-08-16 13:29:33 -0700 | [diff] [blame] | 207 | list_del_init(&vma->exec_list); |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 208 | __i915_vma_unpin(vma); |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 209 | if (ret == 0) |
Ben Widawsky | 82a55ad | 2013-08-14 11:38:34 +0200 | [diff] [blame] | 210 | ret = i915_vma_unbind(vma); |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 211 | } |
Chris Wilson | e39a015 | 2010-09-29 22:23:05 +0100 | [diff] [blame] | 212 | return ret; |
Chris Wilson | b47eb4a | 2010-08-07 11:01:23 +0100 | [diff] [blame] | 213 | } |
| 214 | |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 215 | /** |
| 216 | * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one |
| 217 | * @target: address space and range to evict for |
| 218 | * @flags: additional flags to control the eviction algorithm |
| 219 | * |
| 220 | * This function will try to evict vmas that overlap the target node. |
| 221 | * |
| 222 | * To clarify: This is for freeing up virtual address space, not for freeing |
| 223 | * memory in e.g. the shrinker. |
| 224 | */ |
| 225 | int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags) |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 226 | { |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 227 | LIST_HEAD(eviction_list); |
| 228 | struct drm_mm_node *node; |
| 229 | u64 start = target->node.start; |
| 230 | u64 end = start + target->node.size; |
| 231 | struct i915_vma *vma, *next; |
| 232 | bool check_color; |
| 233 | int ret = 0; |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 234 | |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 235 | lockdep_assert_held(&target->vm->i915->drm.struct_mutex); |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 236 | trace_i915_gem_evict_vma(target, flags); |
Chris Wilson | 4c7d62c | 2016-10-28 13:58:32 +0100 | [diff] [blame] | 237 | |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 238 | check_color = target->vm->mm.color_adjust; |
| 239 | if (check_color) { |
| 240 | /* Expand search to cover neighbouring guard pages (or lack!) */ |
| 241 | if (start > target->vm->start) |
| 242 | start -= 4096; |
| 243 | if (end < target->vm->start + target->vm->total) |
| 244 | end += 4096; |
| 245 | } |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 246 | |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 247 | drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) { |
| 248 | /* If we find any non-objects (!vma), we cannot evict them */ |
| 249 | if (node->color == I915_COLOR_UNEVICTABLE) { |
| 250 | ret = -ENOSPC; |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 251 | break; |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 252 | } |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 253 | |
| 254 | vma = container_of(node, typeof(*vma), node); |
| 255 | |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 256 | /* If we are using coloring to insert guard pages between |
| 257 | * different cache domains within the address space, we have |
| 258 | * to check whether the objects on either side of our range |
| 259 | * abutt and conflict. If they are in conflict, then we evict |
| 260 | * those as well to make room for our guard pages. |
| 261 | */ |
| 262 | if (check_color) { |
| 263 | if (vma->node.start + vma->node.size == target->node.start) { |
| 264 | if (vma->node.color == target->node.color) |
| 265 | continue; |
| 266 | } |
| 267 | if (vma->node.start == target->node.start + target->node.size) { |
| 268 | if (vma->node.color == target->node.color) |
| 269 | continue; |
| 270 | } |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 271 | } |
| 272 | |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 273 | if (flags & PIN_NONBLOCK && |
| 274 | (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { |
| 275 | ret = -ENOSPC; |
| 276 | break; |
| 277 | } |
| 278 | |
| 279 | /* Overlap of objects in the same batch? */ |
| 280 | if (i915_vma_is_pinned(vma)) { |
| 281 | ret = -ENOSPC; |
| 282 | if (vma->exec_entry && |
| 283 | vma->exec_entry->flags & EXEC_OBJECT_PINNED) |
| 284 | ret = -EINVAL; |
| 285 | break; |
| 286 | } |
| 287 | |
| 288 | /* Never show fear in the face of dragons! |
| 289 | * |
| 290 | * We cannot directly remove this node from within this |
| 291 | * iterator and as with i915_gem_evict_something() we employ |
| 292 | * the vma pin_count in order to prevent the action of |
| 293 | * unbinding one vma from freeing (by dropping its active |
| 294 | * reference) another in our eviction list. |
| 295 | */ |
| 296 | __i915_vma_pin(vma); |
| 297 | list_add(&vma->exec_list, &eviction_list); |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 298 | } |
| 299 | |
Chris Wilson | 172ae5b | 2016-12-05 14:29:37 +0000 | [diff] [blame^] | 300 | list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { |
| 301 | list_del_init(&vma->exec_list); |
| 302 | __i915_vma_unpin(vma); |
| 303 | if (ret == 0) |
| 304 | ret = i915_vma_unbind(vma); |
| 305 | } |
| 306 | |
| 307 | return ret; |
Chris Wilson | 506a8e8 | 2015-12-08 11:55:07 +0000 | [diff] [blame] | 308 | } |
| 309 | |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 310 | /** |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 311 | * i915_gem_evict_vm - Evict all idle vmas from a vm |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 312 | * @vm: Address space to cleanse |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 313 | * @do_idle: Boolean directing whether to idle first. |
| 314 | * |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 315 | * This function evicts all idles vmas from a vm. If all unpinned vmas should be |
| 316 | * evicted the @do_idle needs to be set to true. |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 317 | * |
Daniel Vetter | c2c1d49 | 2014-01-29 22:07:11 +0100 | [diff] [blame] | 318 | * This is used by the execbuf code as a last-ditch effort to defragment the |
| 319 | * address space. |
| 320 | * |
| 321 | * To clarify: This is for freeing up virtual address space, not for freeing |
| 322 | * memory in e.g. the shrinker. |
Ben Widawsky | 68c8c17 | 2013-09-11 14:57:50 -0700 | [diff] [blame] | 323 | */ |
| 324 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 325 | { |
| 326 | struct i915_vma *vma, *next; |
| 327 | int ret; |
| 328 | |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 329 | lockdep_assert_held(&vm->i915->drm.struct_mutex); |
Ben Widawsky | bcccff8 | 2013-09-24 09:57:56 -0700 | [diff] [blame] | 330 | trace_i915_gem_evict_vm(vm); |
| 331 | |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 332 | if (do_idle) { |
Chris Wilson | 49d7391 | 2016-11-29 09:50:08 +0000 | [diff] [blame] | 333 | struct drm_i915_private *dev_priv = vm->i915; |
Chris Wilson | 6e5a5be | 2016-06-24 14:55:57 +0100 | [diff] [blame] | 334 | |
Chris Wilson | 883445d | 2016-06-24 14:55:58 +0100 | [diff] [blame] | 335 | if (i915_is_ggtt(vm)) { |
Chris Wilson | 945657b | 2016-07-15 14:56:19 +0100 | [diff] [blame] | 336 | ret = i915_gem_switch_to_kernel_context(dev_priv); |
Chris Wilson | 883445d | 2016-06-24 14:55:58 +0100 | [diff] [blame] | 337 | if (ret) |
| 338 | return ret; |
| 339 | } |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 340 | |
Chris Wilson | 22dd3bb | 2016-09-09 14:11:50 +0100 | [diff] [blame] | 341 | ret = i915_gem_wait_for_idle(dev_priv, |
| 342 | I915_WAIT_INTERRUPTIBLE | |
| 343 | I915_WAIT_LOCKED); |
Chris Wilson | 6e5a5be | 2016-06-24 14:55:57 +0100 | [diff] [blame] | 344 | if (ret) |
| 345 | return ret; |
| 346 | |
| 347 | i915_gem_retire_requests(dev_priv); |
Ben Widawsky | b9b5dce | 2014-12-23 17:16:04 +0000 | [diff] [blame] | 348 | WARN_ON(!list_empty(&vm->active_list)); |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Chris Wilson | 1c7f4bc | 2016-02-26 11:03:19 +0000 | [diff] [blame] | 351 | list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) |
Chris Wilson | 20dfbde | 2016-08-04 16:32:30 +0100 | [diff] [blame] | 352 | if (!i915_vma_is_pinned(vma)) |
Ben Widawsky | 7b79612 | 2013-09-11 14:57:49 -0700 | [diff] [blame] | 353 | WARN_ON(i915_vma_unbind(vma)); |
| 354 | |
| 355 | return 0; |
| 356 | } |