blob: 5b88ba4c974e18967ac5db59046bdcd7fa067aee [file] [log] [blame]
Terje Bergstrom75471682013-03-22 16:34:01 +02001/*
2 * Tegra host1x Syncpoints
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/slab.h>
22
23#include <trace/events/host1x.h>
24
25#include "syncpt.h"
26#include "dev.h"
Terje Bergstrom7ede0b02013-03-22 16:34:02 +020027#include "intr.h"
Terje Bergstrom62364512013-03-22 16:34:04 +020028#include "debug.h"
Terje Bergstrom7ede0b02013-03-22 16:34:02 +020029
30#define SYNCPT_CHECK_PERIOD (2 * HZ)
31#define MAX_STUCK_CHECK_COUNT 15
Terje Bergstrom75471682013-03-22 16:34:01 +020032
Arto Merilainen8736fe82013-10-14 15:21:52 +030033static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
34 struct device *dev,
35 unsigned long flags)
Terje Bergstrom75471682013-03-22 16:34:01 +020036{
37 int i;
38 struct host1x_syncpt *sp = host->syncpt;
39 char *name;
40
41 for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
42 ;
Arto Merilainenedeabfc2013-05-29 13:26:06 +030043
44 if (i >= host->info->nb_pts)
Terje Bergstrom75471682013-03-22 16:34:01 +020045 return NULL;
46
47 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
48 dev ? dev_name(dev) : NULL);
49 if (!name)
50 return NULL;
51
52 sp->dev = dev;
53 sp->name = name;
Arto Merilainen8736fe82013-10-14 15:21:52 +030054
55 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
56 sp->client_managed = true;
57 else
58 sp->client_managed = false;
Terje Bergstrom75471682013-03-22 16:34:01 +020059
60 return sp;
61}
62
63u32 host1x_syncpt_id(struct host1x_syncpt *sp)
64{
65 return sp->id;
66}
67
68/*
69 * Updates the value sent to hardware.
70 */
71u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
72{
73 return (u32)atomic_add_return(incrs, &sp->max_val);
74}
75
76 /*
77 * Write cached syncpoint and waitbase values to hardware.
78 */
79void host1x_syncpt_restore(struct host1x *host)
80{
81 struct host1x_syncpt *sp_base = host->syncpt;
82 u32 i;
83
84 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
85 host1x_hw_syncpt_restore(host, sp_base + i);
86 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
87 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
88 wmb();
89}
90
91/*
92 * Update the cached syncpoint and waitbase values by reading them
93 * from the registers.
94 */
95void host1x_syncpt_save(struct host1x *host)
96{
97 struct host1x_syncpt *sp_base = host->syncpt;
98 u32 i;
99
100 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
101 if (host1x_syncpt_client_managed(sp_base + i))
102 host1x_hw_syncpt_load(host, sp_base + i);
103 else
104 WARN_ON(!host1x_syncpt_idle(sp_base + i));
105 }
106
107 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
108 host1x_hw_syncpt_load_wait_base(host, sp_base + i);
109}
110
111/*
112 * Updates the cached syncpoint value by reading a new value from the hardware
113 * register
114 */
115u32 host1x_syncpt_load(struct host1x_syncpt *sp)
116{
117 u32 val;
118 val = host1x_hw_syncpt_load(sp->host, sp);
119 trace_host1x_syncpt_load_min(sp->id, val);
120
121 return val;
122}
123
124/*
125 * Get the current syncpoint base
126 */
127u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
128{
129 u32 val;
130 host1x_hw_syncpt_load_wait_base(sp->host, sp);
131 val = sp->base_val;
132 return val;
133}
134
135/*
Terje Bergstrom75471682013-03-22 16:34:01 +0200136 * Increment syncpoint value from cpu, updating cache
137 */
Arto Merilainenebae30b2013-05-29 13:26:08 +0300138int host1x_syncpt_incr(struct host1x_syncpt *sp)
Terje Bergstrom75471682013-03-22 16:34:01 +0200139{
Arto Merilainenebae30b2013-05-29 13:26:08 +0300140 return host1x_hw_syncpt_cpu_incr(sp->host, sp);
Terje Bergstrom75471682013-03-22 16:34:01 +0200141}
142
Terje Bergstrom7ede0b02013-03-22 16:34:02 +0200143/*
144 * Updated sync point form hardware, and returns true if syncpoint is expired,
145 * false if we may need to wait
146 */
147static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
148{
149 host1x_hw_syncpt_load(sp->host, sp);
150 return host1x_syncpt_is_expired(sp, thresh);
151}
152
153/*
154 * Main entrypoint for syncpoint value waits.
155 */
156int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
157 u32 *value)
158{
159 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
160 void *ref;
161 struct host1x_waitlist *waiter;
162 int err = 0, check_count = 0;
163 u32 val;
164
165 if (value)
166 *value = 0;
167
168 /* first check cache */
169 if (host1x_syncpt_is_expired(sp, thresh)) {
170 if (value)
171 *value = host1x_syncpt_load(sp);
172 return 0;
173 }
174
175 /* try to read from register */
176 val = host1x_hw_syncpt_load(sp->host, sp);
177 if (host1x_syncpt_is_expired(sp, thresh)) {
178 if (value)
179 *value = val;
180 goto done;
181 }
182
183 if (!timeout) {
184 err = -EAGAIN;
185 goto done;
186 }
187
188 /* allocate a waiter */
189 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
190 if (!waiter) {
191 err = -ENOMEM;
192 goto done;
193 }
194
195 /* schedule a wakeup when the syncpoint value is reached */
196 err = host1x_intr_add_action(sp->host, sp->id, thresh,
197 HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
198 &wq, waiter, &ref);
199 if (err)
200 goto done;
201
202 err = -EAGAIN;
203 /* Caller-specified timeout may be impractically low */
204 if (timeout < 0)
205 timeout = LONG_MAX;
206
207 /* wait for the syncpoint, or timeout, or signal */
208 while (timeout) {
209 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
210 int remain = wait_event_interruptible_timeout(wq,
211 syncpt_load_min_is_expired(sp, thresh),
212 check);
213 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
214 if (value)
215 *value = host1x_syncpt_load(sp);
216 err = 0;
217 break;
218 }
219 if (remain < 0) {
220 err = remain;
221 break;
222 }
223 timeout -= check;
224 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
225 dev_warn(sp->host->dev,
226 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
227 current->comm, sp->id, sp->name,
228 thresh, timeout);
Terje Bergstrom62364512013-03-22 16:34:04 +0200229
230 host1x_debug_dump_syncpts(sp->host);
231 if (check_count == MAX_STUCK_CHECK_COUNT)
232 host1x_debug_dump(sp->host);
Terje Bergstrom7ede0b02013-03-22 16:34:02 +0200233 check_count++;
234 }
235 }
236 host1x_intr_put_ref(sp->host, sp->id, ref);
237
238done:
239 return err;
240}
241EXPORT_SYMBOL(host1x_syncpt_wait);
242
243/*
244 * Returns true if syncpoint is expired, false if we may need to wait
245 */
246bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
247{
248 u32 current_val;
249 u32 future_val;
250 smp_rmb();
251 current_val = (u32)atomic_read(&sp->min_val);
252 future_val = (u32)atomic_read(&sp->max_val);
253
254 /* Note the use of unsigned arithmetic here (mod 1<<32).
255 *
256 * c = current_val = min_val = the current value of the syncpoint.
257 * t = thresh = the value we are checking
258 * f = future_val = max_val = the value c will reach when all
259 * outstanding increments have completed.
260 *
261 * Note that c always chases f until it reaches f.
262 *
263 * Dtf = (f - t)
264 * Dtc = (c - t)
265 *
266 * Consider all cases:
267 *
268 * A) .....c..t..f..... Dtf < Dtc need to wait
269 * B) .....c.....f..t.. Dtf > Dtc expired
270 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
271 *
272 * Any case where f==c: always expired (for any t). Dtf == Dcf
273 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
274 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
275 * Dtc!=0)
276 *
277 * Other cases:
278 *
279 * A) .....t..f..c..... Dtf < Dtc need to wait
280 * A) .....f..c..t..... Dtf < Dtc need to wait
281 * A) .....f..t..c..... Dtf > Dtc expired
282 *
283 * So:
284 * Dtf >= Dtc implies EXPIRED (return true)
285 * Dtf < Dtc implies WAIT (return false)
286 *
287 * Note: If t is expired then we *cannot* wait on it. We would wait
288 * forever (hang the system).
289 *
290 * Note: do NOT get clever and remove the -thresh from both sides. It
291 * is NOT the same.
292 *
293 * If future valueis zero, we have a client managed sync point. In that
294 * case we do a direct comparison.
295 */
296 if (!host1x_syncpt_client_managed(sp))
297 return future_val - thresh >= current_val - thresh;
298 else
299 return (s32)(current_val - thresh) >= 0;
300}
301
Terje Bergstrom65793242013-03-22 16:34:03 +0200302/* remove a wait pointed to by patch_addr */
303int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
304{
305 return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
306}
307
Terje Bergstrom75471682013-03-22 16:34:01 +0200308int host1x_syncpt_init(struct host1x *host)
309{
310 struct host1x_syncpt *syncpt;
311 int i;
312
313 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
314 GFP_KERNEL);
315 if (!syncpt)
316 return -ENOMEM;
317
318 for (i = 0; i < host->info->nb_pts; ++i) {
319 syncpt[i].id = i;
320 syncpt[i].host = host;
321 }
322
323 host->syncpt = syncpt;
324
325 host1x_syncpt_restore(host);
326
Terje Bergstrom65793242013-03-22 16:34:03 +0200327 /* Allocate sync point to use for clearing waits for expired fences */
Arto Merilainen8736fe82013-10-14 15:21:52 +0300328 host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
Terje Bergstrom65793242013-03-22 16:34:03 +0200329 if (!host->nop_sp)
330 return -ENOMEM;
331
Terje Bergstrom75471682013-03-22 16:34:01 +0200332 return 0;
333}
334
335struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
Arto Merilainen8736fe82013-10-14 15:21:52 +0300336 unsigned long flags)
Terje Bergstrom75471682013-03-22 16:34:01 +0200337{
338 struct host1x *host = dev_get_drvdata(dev->parent);
Arto Merilainen8736fe82013-10-14 15:21:52 +0300339 return host1x_syncpt_alloc(host, dev, flags);
Terje Bergstrom75471682013-03-22 16:34:01 +0200340}
341
342void host1x_syncpt_free(struct host1x_syncpt *sp)
343{
344 if (!sp)
345 return;
346
347 kfree(sp->name);
348 sp->dev = NULL;
349 sp->name = NULL;
Arto Merilainenece66892013-05-29 13:26:07 +0300350 sp->client_managed = false;
Terje Bergstrom75471682013-03-22 16:34:01 +0200351}
352
353void host1x_syncpt_deinit(struct host1x *host)
354{
355 int i;
356 struct host1x_syncpt *sp = host->syncpt;
357 for (i = 0; i < host->info->nb_pts; i++, sp++)
358 kfree(sp->name);
359}
360
Thierry Reding35d747a2013-09-24 16:30:32 +0200361/*
362 * Read max. It indicates how many operations there are in queue, either in
363 * channel or in a software thread.
364 * */
365u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
366{
367 smp_rmb();
368 return (u32)atomic_read(&sp->max_val);
369}
370
371/*
372 * Read min, which is a shadow of the current sync point value in hardware.
373 */
374u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
375{
376 smp_rmb();
377 return (u32)atomic_read(&sp->min_val);
378}
379
Terje Bergstrom75471682013-03-22 16:34:01 +0200380int host1x_syncpt_nb_pts(struct host1x *host)
381{
382 return host->info->nb_pts;
383}
384
385int host1x_syncpt_nb_bases(struct host1x *host)
386{
387 return host->info->nb_bases;
388}
389
390int host1x_syncpt_nb_mlocks(struct host1x *host)
391{
392 return host->info->nb_mlocks;
393}
394
395struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
396{
397 if (host->info->nb_pts < id)
398 return NULL;
399 return host->syncpt + id;
400}