blob: 834405b54243c20228cdc2a0ffe2f7d743b93c19 [file] [log] [blame]
Chris Lew63b2d6b2016-08-01 10:59:49 -07001/* Copyright (c) 2008-2009, 2011-2016 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/delay.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/msm_remote_spinlock.h>
21#include <linux/slab.h>
22
23#include <soc/qcom/smem.h>
24
25/**
26 * The local processor (APPS) is PID 0, but because 0 is reserved for an empty
27 * lock, the value PID + 1 is used as the APSS token when writing to the lock.
28 */
29#define SPINLOCK_TOKEN_APPS 1
30
31static int is_hw_lock_type;
32static DEFINE_MUTEX(ops_init_lock);
33
34struct spinlock_ops {
35 void (*lock)(raw_remote_spinlock_t *lock);
36 void (*unlock)(raw_remote_spinlock_t *lock);
37 int (*trylock)(raw_remote_spinlock_t *lock);
38 int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
39 int (*owner)(raw_remote_spinlock_t *lock);
40 void (*lock_rlock_id)(raw_remote_spinlock_t *lock, uint32_t tid);
41 void (*unlock_rlock)(raw_remote_spinlock_t *lock);
42 int (*get_hw_spinlocks_element)(raw_remote_spinlock_t *lock);
43};
44
45static struct spinlock_ops current_ops;
46
47static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
48
49/* ldrex implementation ----------------------------------------------------- */
50static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
51
52#ifdef CONFIG_ARM
53static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
54{
55 unsigned long tmp;
56
57 __asm__ __volatile__(
58"1: ldrex %0, [%1]\n"
59" teq %0, #0\n"
60" strexeq %0, %2, [%1]\n"
61" teqeq %0, #0\n"
62" bne 1b"
63 : "=&r" (tmp)
64 : "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
65 : "cc");
66
67 /*
68 * Ensure the ordering of read/write operations to ensure the
69 * proper ownership of the lock during the lock/unlock operations
70 */
71 smp_mb();
72}
73
74static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
75{
76 unsigned long tmp;
77
78 __asm__ __volatile__(
79" ldrex %0, [%1]\n"
80" teq %0, #0\n"
81" strexeq %0, %2, [%1]\n"
82 : "=&r" (tmp)
83 : "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
84 : "cc");
85
86 if (tmp == 0) {
87 /*
88 * Ensure the ordering of read/write operations to ensure the
89 * proper ownership of the lock during the lock/unlock
90 * operations
91 */
92 smp_mb();
93 return 1;
94 }
95 return 0;
96}
97
98static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
99{
100 int lock_owner;
101
102 /*
103 * Ensure the ordering of read/write operations to ensure the
104 * proper ownership of the lock during the lock/unlock operations
105 */
106 smp_mb();
107 lock_owner = readl_relaxed(&lock->lock);
108 if (lock_owner != SPINLOCK_TOKEN_APPS) {
109 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
110 __func__, lock_owner);
111 }
112
113 __asm__ __volatile__(
114" str %1, [%0]\n"
115 :
116 : "r" (&lock->lock), "r" (0)
117 : "cc");
118}
119#else
120static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
121{
122}
123
124static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
125{
126 return 0;
127}
128
129static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
130{
131}
132#endif /* CONFIG_ARM */
133/* end ldrex implementation ------------------------------------------------- */
134
135/* sfpb implementation ------------------------------------------------------ */
136static uint32_t lock_count;
137static phys_addr_t reg_base;
138static uint32_t reg_size;
139static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
140static uint32_t lock_size;
141
142static void *hw_mutex_reg_base;
143static DEFINE_MUTEX(hw_map_init_lock);
144static int *hw_spinlocks;
145
146static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
147
148static int init_hw_mutex(struct device_node *node)
149{
150 struct resource r;
151 int rc;
152
153 rc = of_address_to_resource(node, 0, &r);
154 if (rc)
155 BUG();
156
157 rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
158 if (rc)
159 BUG();
160
161 reg_base = r.start;
162 reg_size = (uint32_t)(resource_size(&r));
163 lock_offset = 0;
164 lock_size = reg_size / lock_count;
165
166 return 0;
167}
168
169static void find_and_init_hw_mutex(void)
170{
171 struct device_node *node;
172
173 node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
174 BUG_ON(node == NULL);
175 init_hw_mutex(node);
176 hw_mutex_reg_base = ioremap(reg_base, reg_size);
177 BUG_ON(hw_mutex_reg_base == NULL);
178 hw_spinlocks = kcalloc(lock_count, sizeof(int), GFP_KERNEL);
179 BUG_ON(hw_spinlocks == NULL);
180}
181
182static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
183{
184 /*
185 * Optimistic locking. Init only needs to be done once by the first
186 * caller. After that, serializing inits between different callers
187 * is unnecessary. The second check after the lock ensures init
188 * wasn't previously completed by someone else before the lock could
189 * be grabbed.
190 */
191 if (!hw_mutex_reg_base) {
192 mutex_lock(&hw_map_init_lock);
193 if (!hw_mutex_reg_base)
194 find_and_init_hw_mutex();
195 mutex_unlock(&hw_map_init_lock);
196 }
197
198 if (id >= lock_count)
199 return -EINVAL;
200
201 *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
202 return 0;
203}
204
205static unsigned int remote_spinlock_get_lock_id(raw_remote_spinlock_t *lock)
206{
207 unsigned int id;
208
209 BUG_ON((uintptr_t)lock < (uintptr_t)hw_mutex_reg_base);
210 BUG_ON(((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base) < lock_offset);
211
212 id = (unsigned int)((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base -
213 lock_offset) / lock_size;
214 BUG_ON(id >= lock_count);
215 return id;
216}
217
218static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
219{
220 int owner;
221 unsigned int id = remote_spinlock_get_lock_id(lock);
222
223 /*
224 * Wait for other local processor task to release spinlock if it
225 * already has the remote spinlock locked. This can only happen in
226 * test cases since the local spinlock will prevent this when using the
227 * public APIs.
228 */
229 while (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
230 ;
231
232 /* acquire remote spinlock */
233 do {
234 writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
235 /*
236 * Ensure the ordering of read/write operations to ensure the
237 * proper ownership of the lock during the lock/unlock
238 * operations
239 */
240 smp_mb();
241 owner = readl_relaxed(lock);
242 hw_spinlocks[id] = owner;
243 } while (owner != SPINLOCK_TOKEN_APPS);
244}
245
246static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
247{
248 int owner;
249 unsigned int id = remote_spinlock_get_lock_id(lock);
250 /*
251 * If the local processor owns the spinlock, return failure. This can
252 * only happen in test cases since the local spinlock will prevent this
253 * when using the public APIs.
254 */
255 if (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
256 return 0;
257
258 writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
259 /*
260 * Ensure the ordering of read/write operations to ensure the
261 * proper ownership of the lock during the lock/unlock operations
262 */
263 smp_mb();
264 owner = readl_relaxed(lock);
265 hw_spinlocks[id] = owner;
266 return owner == SPINLOCK_TOKEN_APPS;
267}
268
269static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
270{
271 int lock_owner;
272
273 lock_owner = readl_relaxed(lock);
274 if (lock_owner != SPINLOCK_TOKEN_APPS) {
275 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
276 __func__, lock_owner);
277 }
278
279 writel_relaxed(0, lock);
280 /*
281 * Ensure the ordering of read/write operations to ensure the
282 * proper ownership of the lock during the lock/unlock operations
283 */
284 smp_mb();
285}
286
287static void __raw_remote_sfpb_spin_lock_rlock_id(raw_remote_spinlock_t *lock,
288 uint32_t tid)
289{
290 if (unlikely(!tid)) {
291 pr_err("%s: unsupported rlock tid=0\n", __func__);
292 BUG();
293 }
294
295 do {
296 writel_relaxed(tid, lock);
297 /*
298 * Ensure the ordering of read/write operations to ensure the
299 * proper ownership of the lock during the lock/unlock
300 * operations
301 */
302 smp_mb();
303 } while (readl_relaxed(lock) != tid);
304}
305
306static void __raw_remote_sfpb_spin_unlock_rlock(raw_remote_spinlock_t *lock)
307{
308 writel_relaxed(0, lock);
309 /*
310 * Ensure the ordering of read/write operations to ensure the
311 * proper ownership of the lock during the lock/unlock operations
312 */
313 smp_mb();
314}
315
316static int __raw_remote_sfpb_get_hw_spinlocks_element(
317 raw_remote_spinlock_t *lock)
318{
319 return hw_spinlocks[remote_spinlock_get_lock_id(lock)];
320}
321
322/* end sfpb implementation -------------------------------------------------- */
323
324/* common spinlock API ------------------------------------------------------ */
325/**
326 * Release spinlock if it is owned by @pid.
327 *
328 * This is only to be used for situations where the processor owning
329 * the spinlock has crashed and the spinlock must be released.
330 *
331 * @lock: lock structure
332 * @pid: processor ID of processor to release
333 */
334static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
335 uint32_t pid)
336{
337 int ret = 1;
338
339 /*
340 * Since 0 is reserved for an empty lock and the PIDs start at 0, the
341 * value PID + 1 is written to the lock.
342 */
343 if (readl_relaxed(&lock->lock) == (pid + 1)) {
344 writel_relaxed(0, &lock->lock);
345 /*
346 * Ensure the ordering of read/write operations to ensure the
347 * proper ownership of the lock during the lock/unlock
348 * operations
349 */
350 wmb();
351 ret = 0;
352 }
353 return ret;
354}
355
356/**
357 * Return owner of the spinlock.
358 *
359 * @lock: pointer to lock structure
360 * @returns: >= 0 owned PID; < 0 for error case
361 *
362 * Used for testing. PID's are assumed to be 31 bits or less.
363 */
364static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
365{
366 int owner;
367
368 /*
369 * Ensure the ordering of read/write operations to ensure the
370 * proper ownership of the lock during the lock/unlock operations
371 */
372 rmb();
373
374 owner = readl_relaxed(&lock->lock);
375 if (owner)
376 return owner - 1;
377 else
378 return -ENODEV;
379}
380
381
382static int dt_node_is_valid(const struct device_node *node)
383{
384 const char *status;
385 int statlen;
386
387 status = of_get_property(node, "status", &statlen);
388 if (status == NULL)
389 return 1;
390
391 if (statlen > 0) {
392 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
393 return 1;
394 }
395
396 return 0;
397}
398
399static void initialize_ops(void)
400{
401 struct device_node *node;
402
403 /*
404 * of_find_compatible_node() returns a valid pointer even if
405 * the status property is "disabled", so the validity needs
406 * to be checked
407 */
408 node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
409 if (node && dt_node_is_valid(node)) {
410 current_ops.lock = __raw_remote_sfpb_spin_lock;
411 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
412 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
413 current_ops.release = __raw_remote_gen_spin_release;
414 current_ops.owner = __raw_remote_gen_spin_owner;
415 current_ops.lock_rlock_id =
416 __raw_remote_sfpb_spin_lock_rlock_id;
417 current_ops.unlock_rlock = __raw_remote_sfpb_spin_unlock_rlock;
418 current_ops.get_hw_spinlocks_element =
419 __raw_remote_sfpb_get_hw_spinlocks_element;
420 is_hw_lock_type = 1;
421 return;
422 }
423
424 node = of_find_compatible_node(NULL, NULL, ldrex_compatible_string);
425 if (node && dt_node_is_valid(node)) {
426 current_ops.lock = __raw_remote_ex_spin_lock;
427 current_ops.unlock = __raw_remote_ex_spin_unlock;
428 current_ops.trylock = __raw_remote_ex_spin_trylock;
429 current_ops.release = __raw_remote_gen_spin_release;
430 current_ops.owner = __raw_remote_gen_spin_owner;
431 is_hw_lock_type = 0;
432 return;
433 }
434
435 current_ops.lock = __raw_remote_ex_spin_lock;
436 current_ops.unlock = __raw_remote_ex_spin_unlock;
437 current_ops.trylock = __raw_remote_ex_spin_trylock;
438 current_ops.release = __raw_remote_gen_spin_release;
439 current_ops.owner = __raw_remote_gen_spin_owner;
440 is_hw_lock_type = 0;
441 pr_warn("Falling back to LDREX remote spinlock implementation");
442}
443
444/**
445 * Release all spinlocks owned by @pid.
446 *
447 * This is only to be used for situations where the processor owning
448 * spinlocks has crashed and the spinlocks must be released.
449 *
450 * @pid - processor ID of processor to release
451 */
452static void remote_spin_release_all_locks(uint32_t pid, int count)
453{
454 int n;
455 _remote_spinlock_t lock;
456
457 if (pid >= REMOTE_SPINLOCK_NUM_PID) {
458 pr_err("%s: Unsupported PID %d\n", __func__, pid);
459 return;
460 }
461
462 for (n = 0; n < count; ++n) {
463 if (remote_spinlock_init_address(n, &lock) == 0)
464 _remote_spin_release(&lock, pid);
465 }
466}
467
468void _remote_spin_release_all(uint32_t pid)
469{
470 remote_spin_release_all_locks(pid, lock_count);
471}
472
473#define SMEM_SPINLOCK_COUNT 8
474#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
475
476static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
477{
478 _remote_spinlock_t spinlock_start;
479
480 if (id >= SMEM_SPINLOCK_COUNT)
481 return -EINVAL;
482
483 spinlock_start = smem_find(SMEM_SPINLOCK_ARRAY,
484 SMEM_SPINLOCK_ARRAY_SIZE,
485 0,
486 SMEM_ANY_HOST_FLAG);
487 if (spinlock_start == NULL)
488 return -ENXIO;
489
490 *lock = spinlock_start + id;
491
492 lock_count = SMEM_SPINLOCK_COUNT;
493
494 return 0;
495}
496
497static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
498{
499 if (is_hw_lock_type)
500 return remote_spinlock_init_address_hw(id, lock);
501 else
502 return remote_spinlock_init_address_smem(id, lock);
503}
504
505int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
506{
507 BUG_ON(id == NULL);
508
509 /*
510 * Optimistic locking. Init only needs to be done once by the first
511 * caller. After that, serializing inits between different callers
512 * is unnecessary. The second check after the lock ensures init
513 * wasn't previously completed by someone else before the lock could
514 * be grabbed.
515 */
516 if (!current_ops.lock) {
517 mutex_lock(&ops_init_lock);
518 if (!current_ops.lock)
519 initialize_ops();
520 mutex_unlock(&ops_init_lock);
521 }
522
523 if (id[0] == 'S' && id[1] == ':') {
524 /* Single-digit lock ID follows "S:" */
525 BUG_ON(id[3] != '\0');
526
527 return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
528 lock);
529 } else {
530 return -EINVAL;
531 }
532}
533
534/*
535 * lock comes in as a pointer to a pointer to the lock location, so it must
536 * be dereferenced and casted to the right type for the actual lock
537 * implementation functions
538 */
539void _remote_spin_lock(_remote_spinlock_t *lock)
540{
541 if (unlikely(!current_ops.lock))
542 BUG();
543 current_ops.lock((raw_remote_spinlock_t *)(*lock));
544}
545EXPORT_SYMBOL(_remote_spin_lock);
546
547void _remote_spin_unlock(_remote_spinlock_t *lock)
548{
549 if (unlikely(!current_ops.unlock))
550 BUG();
551 current_ops.unlock((raw_remote_spinlock_t *)(*lock));
552}
553EXPORT_SYMBOL(_remote_spin_unlock);
554
555int _remote_spin_trylock(_remote_spinlock_t *lock)
556{
557 if (unlikely(!current_ops.trylock))
558 BUG();
559 return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
560}
561EXPORT_SYMBOL(_remote_spin_trylock);
562
563int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
564{
565 if (unlikely(!current_ops.release))
566 BUG();
567 return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
568}
569EXPORT_SYMBOL(_remote_spin_release);
570
571int _remote_spin_owner(_remote_spinlock_t *lock)
572{
573 if (unlikely(!current_ops.owner))
574 BUG();
575 return current_ops.owner((raw_remote_spinlock_t *)(*lock));
576}
577EXPORT_SYMBOL(_remote_spin_owner);
578
579void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid)
580{
581 if (unlikely(!current_ops.lock_rlock_id))
582 BUG();
583 current_ops.lock_rlock_id((raw_remote_spinlock_t *)(*lock), tid);
584}
585EXPORT_SYMBOL(_remote_spin_lock_rlock_id);
586
587void _remote_spin_unlock_rlock(_remote_spinlock_t *lock)
588{
589 if (unlikely(!current_ops.unlock_rlock))
590 BUG();
591 current_ops.unlock_rlock((raw_remote_spinlock_t *)(*lock));
592}
593EXPORT_SYMBOL(_remote_spin_unlock_rlock);
594
595int _remote_spin_get_hw_spinlocks_element(_remote_spinlock_t *lock)
596{
597 return current_ops.get_hw_spinlocks_element(
598 (raw_remote_spinlock_t *)(*lock));
599}
600EXPORT_SYMBOL(_remote_spin_get_hw_spinlocks_element);
601
602/* end common spinlock API -------------------------------------------------- */