blob: 62e3e05c9286ab97f72c51edde40c6eb0088a909 [file] [log] [blame]
Jeff Hugo416539f2013-01-16 17:24:36 -07001/* Copyright (c) 2008-2009, 2011-2013 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/delay.h>
Jeff Hugo83e28f52012-12-12 15:16:37 -070018#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include <asm/system.h>
24
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060025#include <mach/msm_iomap.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <mach/remote_spinlock.h>
27#include <mach/dal.h>
28#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060030
Jeff Hugo416539f2013-01-16 17:24:36 -070031#define SPINLOCK_PID_APPS 1
32
33#define AUTO_MODE -1
34#define DEKKERS_MODE 1
35#define SWP_MODE 2
36#define LDREX_MODE 3
37#define SFPB_MODE 4
38
39#if defined(CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS) ||\
40 defined(CONFIG_MSM_REMOTE_SPINLOCK_SWP) ||\
41 defined(CONFIG_MSM_REMOTE_SPINLOCK_LDREX) ||\
42 defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
43
44#ifdef CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS
45/*
46 * Use Dekker's algorithm when LDREX/STREX and SWP are unavailable for
47 * shared memory
48 */
49#define CURRENT_MODE_INIT DEKKERS_MODE;
50#endif
51
52#ifdef CONFIG_MSM_REMOTE_SPINLOCK_SWP
53/* Use SWP-based locks when LDREX/STREX are unavailable for shared memory. */
54#define CURRENT_MODE_INIT SWP_MODE;
55#endif
56
57#ifdef CONFIG_MSM_REMOTE_SPINLOCK_LDREX
58/* Use LDREX/STREX for shared memory locking, when available */
59#define CURRENT_MODE_INIT LDREX_MODE;
60#endif
61
62#ifdef CONFIG_MSM_REMOTE_SPINLOCK_SFPB
63/* Use SFPB Hardware Mutex Registers */
64#define CURRENT_MODE_INIT SFPB_MODE;
65#endif
66
67#else
68/* Use DT info to configure with a fallback to LDREX if DT is missing */
69#define CURRENT_MODE_INIT AUTO_MODE;
70#endif
71
72static int current_mode = CURRENT_MODE_INIT;
73
74static int is_hw_lock_type;
75static DEFINE_MUTEX(ops_init_lock);
76
77struct spinlock_ops {
78 void (*lock)(raw_remote_spinlock_t *lock);
79 void (*unlock)(raw_remote_spinlock_t *lock);
80 int (*trylock)(raw_remote_spinlock_t *lock);
81 int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
82 int (*owner)(raw_remote_spinlock_t *lock);
83};
84
85static struct spinlock_ops current_ops;
86
87static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
88
89/* dekkers implementation --------------------------------------------------- */
90#define DEK_LOCK_REQUEST 1
91#define DEK_LOCK_YIELD (!DEK_LOCK_REQUEST)
92#define DEK_YIELD_TURN_SELF 0
93static void __raw_remote_dek_spin_lock(raw_remote_spinlock_t *lock)
94{
95 lock->dek.self_lock = DEK_LOCK_REQUEST;
96
97 while (lock->dek.other_lock) {
98
99 if (lock->dek.next_yield == DEK_YIELD_TURN_SELF)
100 lock->dek.self_lock = DEK_LOCK_YIELD;
101
102 while (lock->dek.other_lock)
103 ;
104
105 lock->dek.self_lock = DEK_LOCK_REQUEST;
106 }
107 lock->dek.next_yield = DEK_YIELD_TURN_SELF;
108
109 smp_mb();
110}
111
112static int __raw_remote_dek_spin_trylock(raw_remote_spinlock_t *lock)
113{
114 lock->dek.self_lock = DEK_LOCK_REQUEST;
115
116 if (lock->dek.other_lock) {
117 lock->dek.self_lock = DEK_LOCK_YIELD;
118 return 0;
119 }
120
121 lock->dek.next_yield = DEK_YIELD_TURN_SELF;
122
123 smp_mb();
124 return 1;
125}
126
127static void __raw_remote_dek_spin_unlock(raw_remote_spinlock_t *lock)
128{
129 smp_mb();
130
131 lock->dek.self_lock = DEK_LOCK_YIELD;
132}
133
134static int __raw_remote_dek_spin_release(raw_remote_spinlock_t *lock,
135 uint32_t pid)
136{
137 return -EPERM;
138}
139
140static int __raw_remote_dek_spin_owner(raw_remote_spinlock_t *lock)
141{
142 return -EPERM;
143}
144/* end dekkers implementation ----------------------------------------------- */
145
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700146#ifndef CONFIG_THUMB2_KERNEL
Jeff Hugo416539f2013-01-16 17:24:36 -0700147/* swp implementation ------------------------------------------------------- */
148static void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
149{
150 unsigned long tmp;
151
152 __asm__ __volatile__(
153"1: swp %0, %2, [%1]\n"
154" teq %0, #0\n"
155" bne 1b"
156 : "=&r" (tmp)
157 : "r" (&lock->lock), "r" (1)
158 : "cc");
159
160 smp_mb();
161}
162
163static int __raw_remote_swp_spin_trylock(raw_remote_spinlock_t *lock)
164{
165 unsigned long tmp;
166
167 __asm__ __volatile__(
168" swp %0, %2, [%1]\n"
169 : "=&r" (tmp)
170 : "r" (&lock->lock), "r" (1)
171 : "cc");
172
173 if (tmp == 0) {
174 smp_mb();
175 return 1;
176 }
177 return 0;
178}
179
180static void __raw_remote_swp_spin_unlock(raw_remote_spinlock_t *lock)
181{
182 int lock_owner;
183
184 smp_mb();
185 lock_owner = readl_relaxed(&lock->lock);
186 if (lock_owner != SPINLOCK_PID_APPS) {
187 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
188 __func__, lock_owner);
189 }
190
191 __asm__ __volatile__(
192" str %1, [%0]"
193 :
194 : "r" (&lock->lock), "r" (0)
195 : "cc");
196}
197/* end swp implementation --------------------------------------------------- */
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700198#endif
Jeff Hugo416539f2013-01-16 17:24:36 -0700199
200/* ldrex implementation ----------------------------------------------------- */
Jeff Hugo2c239e32013-01-18 13:06:14 -0700201static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
202
Jeff Hugo416539f2013-01-16 17:24:36 -0700203static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
204{
205 unsigned long tmp;
206
207 __asm__ __volatile__(
208"1: ldrex %0, [%1]\n"
209" teq %0, #0\n"
210" strexeq %0, %2, [%1]\n"
211" teqeq %0, #0\n"
212" bne 1b"
213 : "=&r" (tmp)
214 : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
215 : "cc");
216
217 smp_mb();
218}
219
220static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
221{
222 unsigned long tmp;
223
224 __asm__ __volatile__(
225" ldrex %0, [%1]\n"
226" teq %0, #0\n"
227" strexeq %0, %2, [%1]\n"
228 : "=&r" (tmp)
229 : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
230 : "cc");
231
232 if (tmp == 0) {
233 smp_mb();
234 return 1;
235 }
236 return 0;
237}
238
239static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
240{
241 int lock_owner;
242
243 smp_mb();
244 lock_owner = readl_relaxed(&lock->lock);
245 if (lock_owner != SPINLOCK_PID_APPS) {
246 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
247 __func__, lock_owner);
248 }
249
250 __asm__ __volatile__(
251" str %1, [%0]\n"
252 :
253 : "r" (&lock->lock), "r" (0)
254 : "cc");
255}
256/* end ldrex implementation ------------------------------------------------- */
257
258/* sfpb implementation ------------------------------------------------------ */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600259#define SFPB_SPINLOCK_COUNT 8
260#define MSM_SFPB_MUTEX_REG_BASE 0x01200600
261#define MSM_SFPB_MUTEX_REG_SIZE (33 * 4)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700262#define SFPB_SPINLOCK_OFFSET 4
263#define SFPB_SPINLOCK_SIZE 4
264
265static uint32_t lock_count;
266static phys_addr_t reg_base;
267static uint32_t reg_size;
268static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
269static uint32_t lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600270
271static void *hw_mutex_reg_base;
272static DEFINE_MUTEX(hw_map_init_lock);
273
Jeff Hugo2c239e32013-01-18 13:06:14 -0700274static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
Jeff Hugo83e28f52012-12-12 15:16:37 -0700275
276static int init_hw_mutex(struct device_node *node)
277{
278 struct resource r;
279 int rc;
280
281 rc = of_address_to_resource(node, 0, &r);
282 if (rc)
283 BUG();
284
285 rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
286 if (rc)
287 BUG();
288
289 reg_base = r.start;
290 reg_size = (uint32_t)(resource_size(&r));
291 lock_offset = 0;
292 lock_size = reg_size / lock_count;
293
294 return 0;
295}
296
297static void find_and_init_hw_mutex(void)
298{
299 struct device_node *node;
300
Jeff Hugo2c239e32013-01-18 13:06:14 -0700301 node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
Jeff Hugo83e28f52012-12-12 15:16:37 -0700302 if (node) {
303 init_hw_mutex(node);
304 } else {
305 lock_count = SFPB_SPINLOCK_COUNT;
306 reg_base = MSM_SFPB_MUTEX_REG_BASE;
307 reg_size = MSM_SFPB_MUTEX_REG_SIZE;
308 lock_offset = SFPB_SPINLOCK_OFFSET;
309 lock_size = SFPB_SPINLOCK_SIZE;
310 }
311 hw_mutex_reg_base = ioremap(reg_base, reg_size);
312 BUG_ON(hw_mutex_reg_base == NULL);
313}
314
Jeff Hugo416539f2013-01-16 17:24:36 -0700315static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600316{
Jeff Hugo83e28f52012-12-12 15:16:37 -0700317 /*
318 * Optimistic locking. Init only needs to be done once by the first
319 * caller. After that, serializing inits between different callers
320 * is unnecessary. The second check after the lock ensures init
321 * wasn't previously completed by someone else before the lock could
322 * be grabbed.
323 */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600324 if (!hw_mutex_reg_base) {
325 mutex_lock(&hw_map_init_lock);
326 if (!hw_mutex_reg_base)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700327 find_and_init_hw_mutex();
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600328 mutex_unlock(&hw_map_init_lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600329 }
330
Jeff Hugo83e28f52012-12-12 15:16:37 -0700331 if (id >= lock_count)
332 return -EINVAL;
333
334 *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600335 return 0;
336}
337
Jeff Hugo416539f2013-01-16 17:24:36 -0700338static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600339{
Jeff Hugo416539f2013-01-16 17:24:36 -0700340 do {
341 writel_relaxed(SPINLOCK_PID_APPS, lock);
342 smp_mb();
343 } while (readl_relaxed(lock) != SPINLOCK_PID_APPS);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600344}
345
Jeff Hugo416539f2013-01-16 17:24:36 -0700346static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347{
Jeff Hugod36eae62013-03-04 16:56:40 -0700348 writel_relaxed(SPINLOCK_PID_APPS, lock);
349 smp_mb();
350 return readl_relaxed(lock) == SPINLOCK_PID_APPS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351}
352
Jeff Hugo416539f2013-01-16 17:24:36 -0700353static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600354{
Jeff Hugo416539f2013-01-16 17:24:36 -0700355 int lock_owner;
356
357 lock_owner = readl_relaxed(lock);
358 if (lock_owner != SPINLOCK_PID_APPS) {
359 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
360 __func__, lock_owner);
361 }
362
363 writel_relaxed(0, lock);
364 smp_mb();
365}
366/* end sfpb implementation -------------------------------------------------- */
367
368/* common spinlock API ------------------------------------------------------ */
369/**
370 * Release spinlock if it is owned by @pid.
371 *
372 * This is only to be used for situations where the processor owning
373 * the spinlock has crashed and the spinlock must be released.
374 *
375 * @lock: lock structure
376 * @pid: processor ID of processor to release
377 */
378static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
379 uint32_t pid)
380{
381 int ret = 1;
382
383 if (readl_relaxed(&lock->lock) == pid) {
384 writel_relaxed(0, &lock->lock);
385 wmb();
386 ret = 0;
387 }
388 return ret;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600389}
390
Jeff Hugo416539f2013-01-16 17:24:36 -0700391/**
392 * Return owner of the spinlock.
393 *
394 * @lock: pointer to lock structure
395 * @returns: >= 0 owned PID; < 0 for error case
396 *
397 * Used for testing. PID's are assumed to be 31 bits or less.
398 */
399static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
400{
401 rmb();
402 return readl_relaxed(&lock->lock);
403}
404
405
Jeff Hugo2c239e32013-01-18 13:06:14 -0700406static int dt_node_is_valid(const struct device_node *node)
407{
408 const char *status;
409 int statlen;
410
411 status = of_get_property(node, "status", &statlen);
412 if (status == NULL)
413 return 1;
414
415 if (statlen > 0) {
416 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
417 return 1;
418 }
419
420 return 0;
421}
422
Jeff Hugo416539f2013-01-16 17:24:36 -0700423static void initialize_ops(void)
424{
425 struct device_node *node;
426
427 switch (current_mode) {
428 case DEKKERS_MODE:
429 current_ops.lock = __raw_remote_dek_spin_lock;
430 current_ops.unlock = __raw_remote_dek_spin_unlock;
431 current_ops.trylock = __raw_remote_dek_spin_trylock;
432 current_ops.release = __raw_remote_dek_spin_release;
433 current_ops.owner = __raw_remote_dek_spin_owner;
434 is_hw_lock_type = 0;
435 break;
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700436#ifndef CONFIG_THUMB2_KERNEL
Jeff Hugo416539f2013-01-16 17:24:36 -0700437 case SWP_MODE:
438 current_ops.lock = __raw_remote_swp_spin_lock;
439 current_ops.unlock = __raw_remote_swp_spin_unlock;
440 current_ops.trylock = __raw_remote_swp_spin_trylock;
441 current_ops.release = __raw_remote_gen_spin_release;
442 current_ops.owner = __raw_remote_gen_spin_owner;
443 is_hw_lock_type = 0;
444 break;
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700445#endif
Jeff Hugo416539f2013-01-16 17:24:36 -0700446 case LDREX_MODE:
447 current_ops.lock = __raw_remote_ex_spin_lock;
448 current_ops.unlock = __raw_remote_ex_spin_unlock;
449 current_ops.trylock = __raw_remote_ex_spin_trylock;
450 current_ops.release = __raw_remote_gen_spin_release;
451 current_ops.owner = __raw_remote_gen_spin_owner;
452 is_hw_lock_type = 0;
453 break;
454 case SFPB_MODE:
455 current_ops.lock = __raw_remote_sfpb_spin_lock;
456 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
457 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
458 current_ops.release = __raw_remote_gen_spin_release;
459 current_ops.owner = __raw_remote_gen_spin_owner;
460 is_hw_lock_type = 1;
461 break;
462 case AUTO_MODE:
Jeff Hugo2c239e32013-01-18 13:06:14 -0700463 /*
464 * of_find_compatible_node() returns a valid pointer even if
465 * the status property is "disabled", so the validity needs
466 * to be checked
467 */
468 node = of_find_compatible_node(NULL, NULL,
469 sfpb_compatible_string);
470 if (node && dt_node_is_valid(node)) {
Jeff Hugo416539f2013-01-16 17:24:36 -0700471 current_ops.lock = __raw_remote_sfpb_spin_lock;
472 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
473 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
474 current_ops.release = __raw_remote_gen_spin_release;
475 current_ops.owner = __raw_remote_gen_spin_owner;
476 is_hw_lock_type = 1;
Jeff Hugo2c239e32013-01-18 13:06:14 -0700477 break;
478 }
479
480 node = of_find_compatible_node(NULL, NULL,
481 ldrex_compatible_string);
482 if (node && dt_node_is_valid(node)) {
Jeff Hugo416539f2013-01-16 17:24:36 -0700483 current_ops.lock = __raw_remote_ex_spin_lock;
484 current_ops.unlock = __raw_remote_ex_spin_unlock;
485 current_ops.trylock = __raw_remote_ex_spin_trylock;
486 current_ops.release = __raw_remote_gen_spin_release;
487 current_ops.owner = __raw_remote_gen_spin_owner;
488 is_hw_lock_type = 0;
Jeff Hugo2c239e32013-01-18 13:06:14 -0700489 break;
Jeff Hugo416539f2013-01-16 17:24:36 -0700490 }
Jeff Hugo2c239e32013-01-18 13:06:14 -0700491
492 current_ops.lock = __raw_remote_ex_spin_lock;
493 current_ops.unlock = __raw_remote_ex_spin_unlock;
494 current_ops.trylock = __raw_remote_ex_spin_trylock;
495 current_ops.release = __raw_remote_gen_spin_release;
496 current_ops.owner = __raw_remote_gen_spin_owner;
497 is_hw_lock_type = 0;
498 pr_warn("Falling back to LDREX remote spinlock implementation");
Jeff Hugo416539f2013-01-16 17:24:36 -0700499 break;
500 default:
501 BUG();
502 break;
503 }
504}
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600505
506/**
507 * Release all spinlocks owned by @pid.
508 *
509 * This is only to be used for situations where the processor owning
510 * spinlocks has crashed and the spinlocks must be released.
511 *
512 * @pid - processor ID of processor to release
513 */
514static void remote_spin_release_all_locks(uint32_t pid, int count)
515{
516 int n;
517 _remote_spinlock_t lock;
518
519 for (n = 0; n < count; ++n) {
520 if (remote_spinlock_init_address(n, &lock) == 0)
521 _remote_spin_release(&lock, pid);
522 }
523}
524
Jeff Hugo416539f2013-01-16 17:24:36 -0700525void _remote_spin_release_all(uint32_t pid)
526{
527 remote_spin_release_all_locks(pid, lock_count);
528}
529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530static int
531remote_spinlock_dal_init(const char *chunk_name, _remote_spinlock_t *lock)
532{
533 void *dal_smem_start, *dal_smem_end;
534 uint32_t dal_smem_size;
535 struct dal_chunk_header *cur_header;
536
537 if (!chunk_name)
538 return -EINVAL;
539
540 dal_smem_start = smem_get_entry(SMEM_DAL_AREA, &dal_smem_size);
541 if (!dal_smem_start)
542 return -ENXIO;
543
544 dal_smem_end = dal_smem_start + dal_smem_size;
545
546 /* Find first chunk header */
547 cur_header = (struct dal_chunk_header *)
548 (((uint32_t)dal_smem_start + (4095)) & ~4095);
549 *lock = NULL;
550 while (cur_header->size != 0
551 && ((uint32_t)(cur_header + 1) < (uint32_t)dal_smem_end)) {
552
553 /* Check if chunk name matches */
554 if (!strncmp(cur_header->name, chunk_name,
555 DAL_CHUNK_NAME_LENGTH)) {
556 *lock = (_remote_spinlock_t)&cur_header->lock;
557 return 0;
558 }
559 cur_header = (void *)cur_header + cur_header->size;
560 }
561
562 pr_err("%s: DAL remote lock \"%s\" not found.\n", __func__,
563 chunk_name);
564 return -EINVAL;
565}
566
Jeff Hugo416539f2013-01-16 17:24:36 -0700567#define SMEM_SPINLOCK_COUNT 8
568#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
569
570static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
571{
572 _remote_spinlock_t spinlock_start;
573
574 if (id >= SMEM_SPINLOCK_COUNT)
575 return -EINVAL;
576
577 spinlock_start = smem_alloc(SMEM_SPINLOCK_ARRAY,
578 SMEM_SPINLOCK_ARRAY_SIZE);
579 if (spinlock_start == NULL)
580 return -ENXIO;
581
582 *lock = spinlock_start + id;
583
584 lock_count = SMEM_SPINLOCK_COUNT;
585
586 return 0;
587}
588
589static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
590{
591 if (is_hw_lock_type)
592 return remote_spinlock_init_address_hw(id, lock);
593 else
594 return remote_spinlock_init_address_smem(id, lock);
595}
596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
598{
599 BUG_ON(id == NULL);
600
Jeff Hugo416539f2013-01-16 17:24:36 -0700601 /*
602 * Optimistic locking. Init only needs to be done once by the first
603 * caller. After that, serializing inits between different callers
604 * is unnecessary. The second check after the lock ensures init
605 * wasn't previously completed by someone else before the lock could
606 * be grabbed.
607 */
608 if (!current_ops.lock) {
609 mutex_lock(&ops_init_lock);
610 if (!current_ops.lock)
611 initialize_ops();
612 mutex_unlock(&ops_init_lock);
613 }
614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 if (id[0] == 'D' && id[1] == ':') {
616 /* DAL chunk name starts after "D:" */
617 return remote_spinlock_dal_init(&id[2], lock);
618 } else if (id[0] == 'S' && id[1] == ':') {
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600619 /* Single-digit lock ID follows "S:" */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 BUG_ON(id[3] != '\0');
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600621
622 return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
Jeff Hugo416539f2013-01-16 17:24:36 -0700623 lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600624 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 return -EINVAL;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600626 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627}
628
Jeff Hugo416539f2013-01-16 17:24:36 -0700629/*
630 * lock comes in as a pointer to a pointer to the lock location, so it must
631 * be dereferenced and casted to the right type for the actual lock
632 * implementation functions
633 */
634void _remote_spin_lock(_remote_spinlock_t *lock)
635{
636 if (unlikely(!current_ops.lock))
637 BUG();
638 current_ops.lock((raw_remote_spinlock_t *)(*lock));
639}
640EXPORT_SYMBOL(_remote_spin_lock);
641
642void _remote_spin_unlock(_remote_spinlock_t *lock)
643{
644 if (unlikely(!current_ops.unlock))
645 BUG();
646 current_ops.unlock((raw_remote_spinlock_t *)(*lock));
647}
648EXPORT_SYMBOL(_remote_spin_unlock);
649
650int _remote_spin_trylock(_remote_spinlock_t *lock)
651{
652 if (unlikely(!current_ops.trylock))
653 BUG();
654 return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
655}
656EXPORT_SYMBOL(_remote_spin_trylock);
657
658int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
659{
660 if (unlikely(!current_ops.release))
661 BUG();
662 return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
663}
664EXPORT_SYMBOL(_remote_spin_release);
665
666int _remote_spin_owner(_remote_spinlock_t *lock)
667{
668 if (unlikely(!current_ops.owner))
669 BUG();
670 return current_ops.owner((raw_remote_spinlock_t *)(*lock));
671}
672EXPORT_SYMBOL(_remote_spin_owner);
673/* end common spinlock API -------------------------------------------------- */
674
675/* remote mutex implementation ---------------------------------------------- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676int _remote_mutex_init(struct remote_mutex_id *id, _remote_mutex_t *lock)
677{
678 BUG_ON(id == NULL);
679
680 lock->delay_us = id->delay_us;
681 return _remote_spin_lock_init(id->r_spinlock_id, &(lock->r_spinlock));
682}
683EXPORT_SYMBOL(_remote_mutex_init);
684
685void _remote_mutex_lock(_remote_mutex_t *lock)
686{
687 while (!_remote_spin_trylock(&(lock->r_spinlock))) {
688 if (lock->delay_us >= 1000)
689 msleep(lock->delay_us/1000);
690 else
691 udelay(lock->delay_us);
692 }
693}
694EXPORT_SYMBOL(_remote_mutex_lock);
695
696void _remote_mutex_unlock(_remote_mutex_t *lock)
697{
698 _remote_spin_unlock(&(lock->r_spinlock));
699}
700EXPORT_SYMBOL(_remote_mutex_unlock);
701
702int _remote_mutex_trylock(_remote_mutex_t *lock)
703{
704 return _remote_spin_trylock(&(lock->r_spinlock));
705}
706EXPORT_SYMBOL(_remote_mutex_trylock);
Jeff Hugo416539f2013-01-16 17:24:36 -0700707/* end remote mutex implementation ------------------------------------------ */