Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
David Howells | 760285e | 2012-10-02 18:01:07 +0100 | [diff] [blame] | 28 | #include <drm/ttm/ttm_execbuf_util.h> |
| 29 | #include <drm/ttm/ttm_bo_driver.h> |
| 30 | #include <drm/ttm/ttm_placement.h> |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 31 | #include <linux/wait.h> |
| 32 | #include <linux/sched.h> |
| 33 | #include <linux/module.h> |
| 34 | |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 35 | static void ttm_eu_backoff_reservation_locked(struct list_head *list) |
| 36 | { |
| 37 | struct ttm_validate_buffer *entry; |
| 38 | |
| 39 | list_for_each_entry(entry, list, head) { |
| 40 | struct ttm_buffer_object *bo = entry->bo; |
| 41 | if (!entry->reserved) |
| 42 | continue; |
| 43 | |
| 44 | if (entry->removed) { |
| 45 | ttm_bo_add_to_lru(bo); |
| 46 | entry->removed = false; |
| 47 | |
| 48 | } |
| 49 | entry->reserved = false; |
| 50 | atomic_set(&bo->reserved, 0); |
| 51 | wake_up_all(&bo->event_queue); |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | static void ttm_eu_del_from_lru_locked(struct list_head *list) |
| 56 | { |
| 57 | struct ttm_validate_buffer *entry; |
| 58 | |
| 59 | list_for_each_entry(entry, list, head) { |
| 60 | struct ttm_buffer_object *bo = entry->bo; |
| 61 | if (!entry->reserved) |
| 62 | continue; |
| 63 | |
| 64 | if (!entry->removed) { |
| 65 | entry->put_count = ttm_bo_del_from_lru(bo); |
| 66 | entry->removed = true; |
| 67 | } |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | static void ttm_eu_list_ref_sub(struct list_head *list) |
| 72 | { |
| 73 | struct ttm_validate_buffer *entry; |
| 74 | |
| 75 | list_for_each_entry(entry, list, head) { |
| 76 | struct ttm_buffer_object *bo = entry->bo; |
| 77 | |
| 78 | if (entry->put_count) { |
| 79 | ttm_bo_list_ref_sub(bo, entry->put_count, true); |
| 80 | entry->put_count = 0; |
| 81 | } |
| 82 | } |
| 83 | } |
| 84 | |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 85 | void ttm_eu_backoff_reservation(struct list_head *list) |
| 86 | { |
| 87 | struct ttm_validate_buffer *entry; |
Thomas Hellstrom | 68c4fa3 | 2010-11-17 12:28:27 +0000 | [diff] [blame] | 88 | struct ttm_bo_global *glob; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 89 | |
Thomas Hellstrom | 68c4fa3 | 2010-11-17 12:28:27 +0000 | [diff] [blame] | 90 | if (list_empty(list)) |
| 91 | return; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 92 | |
Thomas Hellstrom | 68c4fa3 | 2010-11-17 12:28:27 +0000 | [diff] [blame] | 93 | entry = list_first_entry(list, struct ttm_validate_buffer, head); |
| 94 | glob = entry->bo->glob; |
| 95 | spin_lock(&glob->lru_lock); |
| 96 | ttm_eu_backoff_reservation_locked(list); |
| 97 | spin_unlock(&glob->lru_lock); |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 98 | } |
| 99 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); |
| 100 | |
| 101 | /* |
| 102 | * Reserve buffers for validation. |
| 103 | * |
| 104 | * If a buffer in the list is marked for CPU access, we back off and |
| 105 | * wait for that buffer to become free for GPU access. |
| 106 | * |
| 107 | * If a buffer is reserved for another validation, the validator with |
| 108 | * the highest validation sequence backs off and waits for that buffer |
| 109 | * to become unreserved. This prevents deadlocks when validating multiple |
| 110 | * buffers in different orders. |
| 111 | */ |
| 112 | |
Thomas Hellstrom | 6570596 | 2010-11-17 12:28:31 +0000 | [diff] [blame] | 113 | int ttm_eu_reserve_buffers(struct list_head *list) |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 114 | { |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 115 | struct ttm_bo_global *glob; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 116 | struct ttm_validate_buffer *entry; |
| 117 | int ret; |
Thomas Hellstrom | 6570596 | 2010-11-17 12:28:31 +0000 | [diff] [blame] | 118 | uint32_t val_seq; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 119 | |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 120 | if (list_empty(list)) |
| 121 | return 0; |
| 122 | |
| 123 | list_for_each_entry(entry, list, head) { |
| 124 | entry->reserved = false; |
| 125 | entry->put_count = 0; |
| 126 | entry->removed = false; |
| 127 | } |
| 128 | |
| 129 | entry = list_first_entry(list, struct ttm_validate_buffer, head); |
| 130 | glob = entry->bo->glob; |
| 131 | |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 132 | spin_lock(&glob->lru_lock); |
Thomas Hellstrom | 6570596 | 2010-11-17 12:28:31 +0000 | [diff] [blame] | 133 | val_seq = entry->bo->bdev->val_seq++; |
| 134 | |
Maarten Lankhorst | f2d476a | 2013-01-15 14:57:10 +0100 | [diff] [blame^] | 135 | retry: |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 136 | list_for_each_entry(entry, list, head) { |
| 137 | struct ttm_buffer_object *bo = entry->bo; |
| 138 | |
Maarten Lankhorst | f2d476a | 2013-01-15 14:57:10 +0100 | [diff] [blame^] | 139 | /* already slowpath reserved? */ |
| 140 | if (entry->reserved) |
| 141 | continue; |
| 142 | |
Maarten Lankhorst | 63d0a41 | 2013-01-15 14:56:37 +0100 | [diff] [blame] | 143 | ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 144 | switch (ret) { |
| 145 | case 0: |
| 146 | break; |
| 147 | case -EBUSY: |
Maarten Lankhorst | 7a18630 | 2013-01-15 14:56:48 +0100 | [diff] [blame] | 148 | ttm_eu_del_from_lru_locked(list); |
| 149 | spin_unlock(&glob->lru_lock); |
| 150 | ret = ttm_bo_reserve_nolru(bo, true, false, |
| 151 | true, val_seq); |
| 152 | spin_lock(&glob->lru_lock); |
| 153 | if (!ret) |
| 154 | break; |
| 155 | |
| 156 | if (unlikely(ret != -EAGAIN)) |
| 157 | goto err; |
| 158 | |
| 159 | /* fallthrough */ |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 160 | case -EAGAIN: |
| 161 | ttm_eu_backoff_reservation_locked(list); |
Maarten Lankhorst | f2d476a | 2013-01-15 14:57:10 +0100 | [diff] [blame^] | 162 | |
| 163 | /* |
| 164 | * temporarily increase sequence number every retry, |
| 165 | * to prevent us from seeing our old reservation |
| 166 | * sequence when someone else reserved the buffer, |
| 167 | * but hasn't updated the seq_valid/seqno members yet. |
| 168 | */ |
| 169 | val_seq = entry->bo->bdev->val_seq++; |
| 170 | |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 171 | spin_unlock(&glob->lru_lock); |
| 172 | ttm_eu_list_ref_sub(list); |
Maarten Lankhorst | f2d476a | 2013-01-15 14:57:10 +0100 | [diff] [blame^] | 173 | ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 174 | if (unlikely(ret != 0)) |
| 175 | return ret; |
Maarten Lankhorst | f2d476a | 2013-01-15 14:57:10 +0100 | [diff] [blame^] | 176 | spin_lock(&glob->lru_lock); |
| 177 | entry->reserved = true; |
| 178 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
| 179 | ret = -EBUSY; |
| 180 | goto err; |
| 181 | } |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 182 | goto retry; |
| 183 | default: |
Maarten Lankhorst | 7a18630 | 2013-01-15 14:56:48 +0100 | [diff] [blame] | 184 | goto err; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | entry->reserved = true; |
| 188 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
Maarten Lankhorst | 7a18630 | 2013-01-15 14:56:48 +0100 | [diff] [blame] | 189 | ret = -EBUSY; |
| 190 | goto err; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 191 | } |
| 192 | } |
Dave Airlie | d6ea888 | 2010-11-22 13:24:40 +1000 | [diff] [blame] | 193 | |
| 194 | ttm_eu_del_from_lru_locked(list); |
| 195 | spin_unlock(&glob->lru_lock); |
| 196 | ttm_eu_list_ref_sub(list); |
| 197 | |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 198 | return 0; |
Maarten Lankhorst | 7a18630 | 2013-01-15 14:56:48 +0100 | [diff] [blame] | 199 | |
| 200 | err: |
| 201 | ttm_eu_backoff_reservation_locked(list); |
| 202 | spin_unlock(&glob->lru_lock); |
| 203 | ttm_eu_list_ref_sub(list); |
| 204 | return ret; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 205 | } |
| 206 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
| 207 | |
| 208 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) |
| 209 | { |
| 210 | struct ttm_validate_buffer *entry; |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 211 | struct ttm_buffer_object *bo; |
| 212 | struct ttm_bo_global *glob; |
| 213 | struct ttm_bo_device *bdev; |
| 214 | struct ttm_bo_driver *driver; |
| 215 | |
| 216 | if (list_empty(list)) |
| 217 | return; |
| 218 | |
| 219 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; |
| 220 | bdev = bo->bdev; |
| 221 | driver = bdev->driver; |
| 222 | glob = bo->glob; |
| 223 | |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 224 | spin_lock(&glob->lru_lock); |
Maarten Lankhorst | 4154f05 | 2012-11-28 12:25:39 +0100 | [diff] [blame] | 225 | spin_lock(&bdev->fence_lock); |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 226 | |
| 227 | list_for_each_entry(entry, list, head) { |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 228 | bo = entry->bo; |
| 229 | entry->old_sync_obj = bo->sync_obj; |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 230 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 231 | ttm_bo_unreserve_locked(bo); |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 232 | entry->reserved = false; |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 233 | } |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 234 | spin_unlock(&bdev->fence_lock); |
Maarten Lankhorst | 4154f05 | 2012-11-28 12:25:39 +0100 | [diff] [blame] | 235 | spin_unlock(&glob->lru_lock); |
Thomas Hellstrom | 95762c2 | 2010-11-17 12:28:30 +0000 | [diff] [blame] | 236 | |
| 237 | list_for_each_entry(entry, list, head) { |
| 238 | if (entry->old_sync_obj) |
| 239 | driver->sync_obj_unref(&entry->old_sync_obj); |
Thomas Hellstrom | c078aa2 | 2009-12-06 21:46:26 +0100 | [diff] [blame] | 240 | } |
| 241 | } |
| 242 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |