blob: 5e1bcabffef56abb33262fe8ea94efa4416c9612 [file] [log] [blame]
Thomas Hellstromc078aa22009-12-06 21:46:26 +01001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/ttm/ttm_execbuf_util.h>
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
Thomas Hellstromc078aa22009-12-06 21:46:26 +010031#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +010035static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36 struct ttm_validate_buffer *entry)
Dave Airlied6ea8882010-11-22 13:24:40 +100037{
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +010038 list_for_each_entry_continue_reverse(entry, list, head) {
Dave Airlied6ea8882010-11-22 13:24:40 +100039 struct ttm_buffer_object *bo = entry->bo;
Dave Airlied6ea8882010-11-22 13:24:40 +100040
Thomas Hellstromc7523082014-02-20 11:36:25 +010041 __ttm_bo_unreserve(bo);
Dave Airlied6ea8882010-11-22 13:24:40 +100042 }
43}
44
45static void ttm_eu_del_from_lru_locked(struct list_head *list)
46{
47 struct ttm_validate_buffer *entry;
48
49 list_for_each_entry(entry, list, head) {
50 struct ttm_buffer_object *bo = entry->bo;
Peter Zijlstrabdfafc42016-11-14 17:34:19 +010051 ttm_bo_del_from_lru(bo);
Dave Airlied6ea8882010-11-22 13:24:40 +100052 }
53}
54
Maarten Lankhorstecff6652013-06-27 13:48:17 +020055void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
56 struct list_head *list)
Thomas Hellstromc078aa22009-12-06 21:46:26 +010057{
58 struct ttm_validate_buffer *entry;
Thomas Hellstrom68c4fa32010-11-17 12:28:27 +000059 struct ttm_bo_global *glob;
Thomas Hellstromc078aa22009-12-06 21:46:26 +010060
Thomas Hellstrom68c4fa32010-11-17 12:28:27 +000061 if (list_empty(list))
62 return;
Thomas Hellstromc078aa22009-12-06 21:46:26 +010063
Thomas Hellstrom68c4fa32010-11-17 12:28:27 +000064 entry = list_first_entry(list, struct ttm_validate_buffer, head);
65 glob = entry->bo->glob;
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +010066
Thomas Hellstrom68c4fa32010-11-17 12:28:27 +000067 spin_lock(&glob->lru_lock);
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +010068 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo;
70
71 ttm_bo_add_to_lru(bo);
72 __ttm_bo_unreserve(bo);
73 }
74 spin_unlock(&glob->lru_lock);
75
Thomas Hellstrom8d17fb42013-11-15 00:02:54 -080076 if (ticket)
77 ww_acquire_fini(ticket);
Thomas Hellstromc078aa22009-12-06 21:46:26 +010078}
79EXPORT_SYMBOL(ttm_eu_backoff_reservation);
80
81/*
82 * Reserve buffers for validation.
83 *
84 * If a buffer in the list is marked for CPU access, we back off and
85 * wait for that buffer to become free for GPU access.
86 *
87 * If a buffer is reserved for another validation, the validator with
88 * the highest validation sequence backs off and waits for that buffer
89 * to become unreserved. This prevents deadlocks when validating multiple
90 * buffers in different orders.
91 */
92
Maarten Lankhorstecff6652013-06-27 13:48:17 +020093int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
Christian Königaa350712014-12-03 15:46:48 +010094 struct list_head *list, bool intr,
95 struct list_head *dups)
Thomas Hellstromc078aa22009-12-06 21:46:26 +010096{
Dave Airlied6ea8882010-11-22 13:24:40 +100097 struct ttm_bo_global *glob;
Thomas Hellstromc078aa22009-12-06 21:46:26 +010098 struct ttm_validate_buffer *entry;
99 int ret;
100
Dave Airlied6ea8882010-11-22 13:24:40 +1000101 if (list_empty(list))
102 return 0;
103
Dave Airlied6ea8882010-11-22 13:24:40 +1000104 entry = list_first_entry(list, struct ttm_validate_buffer, head);
105 glob = entry->bo->glob;
106
Thomas Hellstrom8d17fb42013-11-15 00:02:54 -0800107 if (ticket)
108 ww_acquire_init(ticket, &reservation_ww_class);
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +0100109
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100110 list_for_each_entry(entry, list, head) {
111 struct ttm_buffer_object *bo = entry->bo;
112
Christian Königdfd5e502016-04-06 11:12:03 +0200113 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +0100114 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
115 __ttm_bo_unreserve(bo);
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200116
Maarten Lankhorst7a186302013-01-15 14:56:48 +0100117 ret = -EBUSY;
Christian Königaa350712014-12-03 15:46:48 +0100118
119 } else if (ret == -EALREADY && dups) {
120 struct ttm_validate_buffer *safe = entry;
121 entry = list_prev_entry(entry, head);
122 list_del(&safe->head);
123 list_add(&safe->head, dups);
124 continue;
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100125 }
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +0100126
Christian Königae9c0af2014-09-04 20:01:52 +0200127 if (!ret) {
128 if (!entry->shared)
129 continue;
130
131 ret = reservation_object_reserve_shared(bo->resv);
132 if (!ret)
133 continue;
134 }
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +0100135
136 /* uh oh, we lost out, drop every reservation and try
137 * to only reserve this buffer, then start over if
138 * this succeeds.
139 */
140 ttm_eu_backoff_reservation_reverse(list, entry);
141
142 if (ret == -EDEADLK && intr) {
143 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
144 ticket);
145 } else if (ret == -EDEADLK) {
146 ww_mutex_lock_slow(&bo->resv->lock, ticket);
147 ret = 0;
148 }
149
Christian Königae9c0af2014-09-04 20:01:52 +0200150 if (!ret && entry->shared)
151 ret = reservation_object_reserve_shared(bo->resv);
152
Maarten Lankhorst1f0dc9a2014-01-09 11:03:08 +0100153 if (unlikely(ret != 0)) {
154 if (ret == -EINTR)
155 ret = -ERESTARTSYS;
156 if (ticket) {
157 ww_acquire_done(ticket);
158 ww_acquire_fini(ticket);
159 }
160 return ret;
161 }
162
163 /* move this item to the front of the list,
164 * forces correct iteration of the loop without keeping track
165 */
166 list_del(&entry->head);
167 list_add(&entry->head, list);
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100168 }
Dave Airlied6ea8882010-11-22 13:24:40 +1000169
Thomas Hellstrom8d17fb42013-11-15 00:02:54 -0800170 if (ticket)
171 ww_acquire_done(ticket);
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200172 spin_lock(&glob->lru_lock);
Dave Airlied6ea8882010-11-22 13:24:40 +1000173 ttm_eu_del_from_lru_locked(list);
174 spin_unlock(&glob->lru_lock);
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100175 return 0;
176}
177EXPORT_SYMBOL(ttm_eu_reserve_buffers);
178
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200179void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
Chris Wilsonf54d1862016-10-25 13:00:45 +0100180 struct list_head *list,
181 struct dma_fence *fence)
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100182{
183 struct ttm_validate_buffer *entry;
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000184 struct ttm_buffer_object *bo;
185 struct ttm_bo_global *glob;
186 struct ttm_bo_device *bdev;
187 struct ttm_bo_driver *driver;
188
189 if (list_empty(list))
190 return;
191
192 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
193 bdev = bo->bdev;
194 driver = bdev->driver;
195 glob = bo->glob;
196
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000197 spin_lock(&glob->lru_lock);
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100198
199 list_for_each_entry(entry, list, head) {
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000200 bo = entry->bo;
Christian Königae9c0af2014-09-04 20:01:52 +0200201 if (entry->shared)
202 reservation_object_add_shared_fence(bo->resv, fence);
203 else
204 reservation_object_add_excl_fence(bo->resv, fence);
Maarten Lankhorst34820322013-06-27 13:48:24 +0200205 ttm_bo_add_to_lru(bo);
Thomas Hellstromc7523082014-02-20 11:36:25 +0100206 __ttm_bo_unreserve(bo);
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000207 }
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100208 spin_unlock(&glob->lru_lock);
Thomas Hellstrom8d17fb42013-11-15 00:02:54 -0800209 if (ticket)
210 ww_acquire_fini(ticket);
Thomas Hellstromc078aa22009-12-06 21:46:26 +0100211}
212EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);