blob: 27201b51d8086b70a2bb05a97cc6864fe10d5514 [file] [log] [blame]
Terje Bergstrom75471682013-03-22 16:34:01 +02001/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/slab.h>
22
23#include <trace/events/host1x.h>
24
25#include "syncpt.h"
26#include "dev.h"
Terje Bergstrom7ede0b02013-03-22 16:34:02 +020027#include "intr.h"
Terje Bergstrom62364512013-03-22 16:34:04 +020028#include "debug.h"
Terje Bergstrom7ede0b02013-03-22 16:34:02 +020029
30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15
Terje Bergstrom75471682013-03-22 16:34:01 +020032
33static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
34 struct device *dev,
Arto Merilainenece66892013-05-29 13:26:07 +030035 bool client_managed)
Terje Bergstrom75471682013-03-22 16:34:01 +020036{
37 int i;
38 struct host1x_syncpt *sp = host->syncpt;
39 char *name;
40
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
42 ;
Arto Merilainenedeabfc2013-05-29 13:26:06 +030043
44 if (i >= host->info->nb_pts)
Terje Bergstrom75471682013-03-22 16:34:01 +020045 return NULL;
46
47 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
48 dev ? dev_name(dev) : NULL);
49 if (!name)
50 return NULL;
51
52 sp->dev = dev;
53 sp->name = name;
54 sp->client_managed = client_managed;
55
56 return sp;
57}
58
59u32 host1x_syncpt_id(struct host1x_syncpt *sp)
60{
61 return sp->id;
62}
63
64/*
65 * Updates the value sent to hardware.
66 */
67u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
68{
69 return (u32)atomic_add_return(incrs, &sp->max_val);
70}
71
72 /*
73 * Write cached syncpoint and waitbase values to hardware.
74 */
75void host1x_syncpt_restore(struct host1x *host)
76{
77 struct host1x_syncpt *sp_base = host->syncpt;
78 u32 i;
79
80 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
81 host1x_hw_syncpt_restore(host, sp_base + i);
82 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
83 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
84 wmb();
85}
86
87/*
88 * Update the cached syncpoint and waitbase values by reading them
89 * from the registers.
90 */
91void host1x_syncpt_save(struct host1x *host)
92{
93 struct host1x_syncpt *sp_base = host->syncpt;
94 u32 i;
95
96 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
97 if (host1x_syncpt_client_managed(sp_base + i))
98 host1x_hw_syncpt_load(host, sp_base + i);
99 else
100 WARN_ON(!host1x_syncpt_idle(sp_base + i));
101 }
102
103 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
104 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
105}
106
107/*
108 * Updates the cached syncpoint value by reading a new value from the hardware
109 * register
110 */
111u32 host1x_syncpt_load(struct host1x_syncpt *sp)
112{
113 u32 val;
114 val = host1x_hw_syncpt_load(sp->host, sp);
115 trace_host1x_syncpt_load_min(sp->id, val);
116
117 return val;
118}
119
120/*
121 * Get the current syncpoint base
122 */
123u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
124{
125 u32 val;
126 host1x_hw_syncpt_load_wait_base(sp->host, sp);
127 val = sp->base_val;
128 return val;
129}
130
131/*
132 * Write a cpu syncpoint increment to the hardware, without touching
133 * the cache. Caller is responsible for host being powered.
134 */
135void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
136{
137 host1x_hw_syncpt_cpu_incr(sp->host, sp);
138}
139
140/*
141 * Increment syncpoint value from cpu, updating cache
142 */
143void host1x_syncpt_incr(struct host1x_syncpt *sp)
144{
145 if (host1x_syncpt_client_managed(sp))
146 host1x_syncpt_incr_max(sp, 1);
147 host1x_syncpt_cpu_incr(sp);
148}
149
Terje Bergstrom7ede0b02013-03-22 16:34:02 +0200150/*
151 * Updated sync point form hardware, and returns true if syncpoint is expired,
152 * false if we may need to wait
153 */
154static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
155{
156 host1x_hw_syncpt_load(sp->host, sp);
157 return host1x_syncpt_is_expired(sp, thresh);
158}
159
160/*
161 * Main entrypoint for syncpoint value waits.
162 */
163int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
164 u32 *value)
165{
166 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
167 void *ref;
168 struct host1x_waitlist *waiter;
169 int err = 0, check_count = 0;
170 u32 val;
171
172 if (value)
173 *value = 0;
174
175 /* first check cache */
176 if (host1x_syncpt_is_expired(sp, thresh)) {
177 if (value)
178 *value = host1x_syncpt_load(sp);
179 return 0;
180 }
181
182 /* try to read from register */
183 val = host1x_hw_syncpt_load(sp->host, sp);
184 if (host1x_syncpt_is_expired(sp, thresh)) {
185 if (value)
186 *value = val;
187 goto done;
188 }
189
190 if (!timeout) {
191 err = -EAGAIN;
192 goto done;
193 }
194
195 /* allocate a waiter */
196 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
197 if (!waiter) {
198 err = -ENOMEM;
199 goto done;
200 }
201
202 /* schedule a wakeup when the syncpoint value is reached */
203 err = host1x_intr_add_action(sp->host, sp->id, thresh,
204 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
205 &wq, waiter, &ref);
206 if (err)
207 goto done;
208
209 err = -EAGAIN;
210 /* Caller-specified timeout may be impractically low */
211 if (timeout < 0)
212 timeout = LONG_MAX;
213
214 /* wait for the syncpoint, or timeout, or signal */
215 while (timeout) {
216 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
217 int remain = wait_event_interruptible_timeout(wq,
218 syncpt_load_min_is_expired(sp, thresh),
219 check);
220 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
221 if (value)
222 *value = host1x_syncpt_load(sp);
223 err = 0;
224 break;
225 }
226 if (remain < 0) {
227 err = remain;
228 break;
229 }
230 timeout -= check;
231 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
232 dev_warn(sp->host->dev,
233 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
234 current->comm, sp->id, sp->name,
235 thresh, timeout);
Terje Bergstrom62364512013-03-22 16:34:04 +0200236
237 host1x_debug_dump_syncpts(sp->host);
238 if (check_count == MAX_STUCK_CHECK_COUNT)
239 host1x_debug_dump(sp->host);
Terje Bergstrom7ede0b02013-03-22 16:34:02 +0200240 check_count++;
241 }
242 }
243 host1x_intr_put_ref(sp->host, sp->id, ref);
244
245done:
246 return err;
247}
248EXPORT_SYMBOL(host1x_syncpt_wait);
249
250/*
251 * Returns true if syncpoint is expired, false if we may need to wait
252 */
253bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
254{
255 u32 current_val;
256 u32 future_val;
257 smp_rmb();
258 current_val = (u32)atomic_read(&sp->min_val);
259 future_val = (u32)atomic_read(&sp->max_val);
260
261 /* Note the use of unsigned arithmetic here (mod 1<<32).
262 *
263 * c = current_val = min_val = the current value of the syncpoint.
264 * t = thresh = the value we are checking
265 * f = future_val = max_val = the value c will reach when all
266 * outstanding increments have completed.
267 *
268 * Note that c always chases f until it reaches f.
269 *
270 * Dtf = (f - t)
271 * Dtc = (c - t)
272 *
273 * Consider all cases:
274 *
275 * A) .....c..t..f..... Dtf < Dtc need to wait
276 * B) .....c.....f..t.. Dtf > Dtc expired
277 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
278 *
279 * Any case where f==c: always expired (for any t). Dtf == Dcf
280 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
281 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
282 * Dtc!=0)
283 *
284 * Other cases:
285 *
286 * A) .....t..f..c..... Dtf < Dtc need to wait
287 * A) .....f..c..t..... Dtf < Dtc need to wait
288 * A) .....f..t..c..... Dtf > Dtc expired
289 *
290 * So:
291 * Dtf >= Dtc implies EXPIRED (return true)
292 * Dtf < Dtc implies WAIT (return false)
293 *
294 * Note: If t is expired then we *cannot* wait on it. We would wait
295 * forever (hang the system).
296 *
297 * Note: do NOT get clever and remove the -thresh from both sides. It
298 * is NOT the same.
299 *
300 * If future valueis zero, we have a client managed sync point. In that
301 * case we do a direct comparison.
302 */
303 if (!host1x_syncpt_client_managed(sp))
304 return future_val - thresh >= current_val - thresh;
305 else
306 return (s32)(current_val - thresh) >= 0;
307}
308
Terje Bergstrom65793242013-03-22 16:34:03 +0200309/* remove a wait pointed to by patch_addr */
310int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
311{
312 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
313}
314
Terje Bergstrom75471682013-03-22 16:34:01 +0200315int host1x_syncpt_init(struct host1x *host)
316{
317 struct host1x_syncpt *syncpt;
318 int i;
319
320 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
321 GFP_KERNEL);
322 if (!syncpt)
323 return -ENOMEM;
324
325 for (i = 0; i < host->info->nb_pts; ++i) {
326 syncpt[i].id = i;
327 syncpt[i].host = host;
328 }
329
330 host->syncpt = syncpt;
331
332 host1x_syncpt_restore(host);
333
Terje Bergstrom65793242013-03-22 16:34:03 +0200334 /* Allocate sync point to use for clearing waits for expired fences */
Arto Merilainenece66892013-05-29 13:26:07 +0300335 host->nop_sp = _host1x_syncpt_alloc(host, NULL, false);
Terje Bergstrom65793242013-03-22 16:34:03 +0200336 if (!host->nop_sp)
337 return -ENOMEM;
338
Terje Bergstrom75471682013-03-22 16:34:01 +0200339 return 0;
340}
341
342struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
Arto Merilainenece66892013-05-29 13:26:07 +0300343 bool client_managed)
Terje Bergstrom75471682013-03-22 16:34:01 +0200344{
345 struct host1x *host = dev_get_drvdata(dev->parent);
346 return _host1x_syncpt_alloc(host, dev, client_managed);
347}
348
349void host1x_syncpt_free(struct host1x_syncpt *sp)
350{
351 if (!sp)
352 return;
353
354 kfree(sp->name);
355 sp->dev = NULL;
356 sp->name = NULL;
Arto Merilainenece66892013-05-29 13:26:07 +0300357 sp->client_managed = false;
Terje Bergstrom75471682013-03-22 16:34:01 +0200358}
359
360void host1x_syncpt_deinit(struct host1x *host)
361{
362 int i;
363 struct host1x_syncpt *sp = host->syncpt;
364 for (i = 0; i < host->info->nb_pts; i++, sp++)
365 kfree(sp->name);
366}
367
368int host1x_syncpt_nb_pts(struct host1x *host)
369{
370 return host->info->nb_pts;
371}
372
373int host1x_syncpt_nb_bases(struct host1x *host)
374{
375 return host->info->nb_bases;
376}
377
378int host1x_syncpt_nb_mlocks(struct host1x *host)
379{
380 return host->info->nb_mlocks;
381}
382
383struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
384{
385 if (host->info->nb_pts < id)
386 return NULL;
387 return host->syncpt + id;
388}