blob: 94923a07726ac0b70049a55d8fde47e0c991a12c [file] [log] [blame]
Jeff Hugo416539f2013-01-16 17:24:36 -07001/* Copyright (c) 2008-2009, 2011-2013 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/delay.h>
Jeff Hugo83e28f52012-12-12 15:16:37 -070018#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include <asm/system.h>
24
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060025#include <mach/msm_iomap.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <mach/remote_spinlock.h>
27#include <mach/dal.h>
28#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060030
Jeff Hugo416539f2013-01-16 17:24:36 -070031#define SPINLOCK_PID_APPS 1
32
33#define AUTO_MODE -1
34#define DEKKERS_MODE 1
35#define SWP_MODE 2
36#define LDREX_MODE 3
37#define SFPB_MODE 4
38
39#if defined(CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS) ||\
40 defined(CONFIG_MSM_REMOTE_SPINLOCK_SWP) ||\
41 defined(CONFIG_MSM_REMOTE_SPINLOCK_LDREX) ||\
42 defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
43
44#ifdef CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS
45/*
46 * Use Dekker's algorithm when LDREX/STREX and SWP are unavailable for
47 * shared memory
48 */
49#define CURRENT_MODE_INIT DEKKERS_MODE;
50#endif
51
52#ifdef CONFIG_MSM_REMOTE_SPINLOCK_SWP
53/* Use SWP-based locks when LDREX/STREX are unavailable for shared memory. */
54#define CURRENT_MODE_INIT SWP_MODE;
55#endif
56
57#ifdef CONFIG_MSM_REMOTE_SPINLOCK_LDREX
58/* Use LDREX/STREX for shared memory locking, when available */
59#define CURRENT_MODE_INIT LDREX_MODE;
60#endif
61
62#ifdef CONFIG_MSM_REMOTE_SPINLOCK_SFPB
63/* Use SFPB Hardware Mutex Registers */
64#define CURRENT_MODE_INIT SFPB_MODE;
65#endif
66
67#else
68/* Use DT info to configure with a fallback to LDREX if DT is missing */
69#define CURRENT_MODE_INIT AUTO_MODE;
70#endif
71
72static int current_mode = CURRENT_MODE_INIT;
73
74static int is_hw_lock_type;
75static DEFINE_MUTEX(ops_init_lock);
76
77struct spinlock_ops {
78 void (*lock)(raw_remote_spinlock_t *lock);
79 void (*unlock)(raw_remote_spinlock_t *lock);
80 int (*trylock)(raw_remote_spinlock_t *lock);
81 int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
82 int (*owner)(raw_remote_spinlock_t *lock);
83};
84
85static struct spinlock_ops current_ops;
86
87static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
88
89/* dekkers implementation --------------------------------------------------- */
90#define DEK_LOCK_REQUEST 1
91#define DEK_LOCK_YIELD (!DEK_LOCK_REQUEST)
92#define DEK_YIELD_TURN_SELF 0
93static void __raw_remote_dek_spin_lock(raw_remote_spinlock_t *lock)
94{
95 lock->dek.self_lock = DEK_LOCK_REQUEST;
96
97 while (lock->dek.other_lock) {
98
99 if (lock->dek.next_yield == DEK_YIELD_TURN_SELF)
100 lock->dek.self_lock = DEK_LOCK_YIELD;
101
102 while (lock->dek.other_lock)
103 ;
104
105 lock->dek.self_lock = DEK_LOCK_REQUEST;
106 }
107 lock->dek.next_yield = DEK_YIELD_TURN_SELF;
108
109 smp_mb();
110}
111
112static int __raw_remote_dek_spin_trylock(raw_remote_spinlock_t *lock)
113{
114 lock->dek.self_lock = DEK_LOCK_REQUEST;
115
116 if (lock->dek.other_lock) {
117 lock->dek.self_lock = DEK_LOCK_YIELD;
118 return 0;
119 }
120
121 lock->dek.next_yield = DEK_YIELD_TURN_SELF;
122
123 smp_mb();
124 return 1;
125}
126
127static void __raw_remote_dek_spin_unlock(raw_remote_spinlock_t *lock)
128{
129 smp_mb();
130
131 lock->dek.self_lock = DEK_LOCK_YIELD;
132}
133
134static int __raw_remote_dek_spin_release(raw_remote_spinlock_t *lock,
135 uint32_t pid)
136{
137 return -EPERM;
138}
139
140static int __raw_remote_dek_spin_owner(raw_remote_spinlock_t *lock)
141{
142 return -EPERM;
143}
144/* end dekkers implementation ----------------------------------------------- */
145
146/* swp implementation ------------------------------------------------------- */
147static void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
148{
149 unsigned long tmp;
150
151 __asm__ __volatile__(
152"1: swp %0, %2, [%1]\n"
153" teq %0, #0\n"
154" bne 1b"
155 : "=&r" (tmp)
156 : "r" (&lock->lock), "r" (1)
157 : "cc");
158
159 smp_mb();
160}
161
162static int __raw_remote_swp_spin_trylock(raw_remote_spinlock_t *lock)
163{
164 unsigned long tmp;
165
166 __asm__ __volatile__(
167" swp %0, %2, [%1]\n"
168 : "=&r" (tmp)
169 : "r" (&lock->lock), "r" (1)
170 : "cc");
171
172 if (tmp == 0) {
173 smp_mb();
174 return 1;
175 }
176 return 0;
177}
178
179static void __raw_remote_swp_spin_unlock(raw_remote_spinlock_t *lock)
180{
181 int lock_owner;
182
183 smp_mb();
184 lock_owner = readl_relaxed(&lock->lock);
185 if (lock_owner != SPINLOCK_PID_APPS) {
186 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
187 __func__, lock_owner);
188 }
189
190 __asm__ __volatile__(
191" str %1, [%0]"
192 :
193 : "r" (&lock->lock), "r" (0)
194 : "cc");
195}
196/* end swp implementation --------------------------------------------------- */
197
198/* ldrex implementation ----------------------------------------------------- */
Jeff Hugo2c239e32013-01-18 13:06:14 -0700199static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
200
Jeff Hugo416539f2013-01-16 17:24:36 -0700201static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
202{
203 unsigned long tmp;
204
205 __asm__ __volatile__(
206"1: ldrex %0, [%1]\n"
207" teq %0, #0\n"
208" strexeq %0, %2, [%1]\n"
209" teqeq %0, #0\n"
210" bne 1b"
211 : "=&r" (tmp)
212 : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
213 : "cc");
214
215 smp_mb();
216}
217
218static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
219{
220 unsigned long tmp;
221
222 __asm__ __volatile__(
223" ldrex %0, [%1]\n"
224" teq %0, #0\n"
225" strexeq %0, %2, [%1]\n"
226 : "=&r" (tmp)
227 : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
228 : "cc");
229
230 if (tmp == 0) {
231 smp_mb();
232 return 1;
233 }
234 return 0;
235}
236
237static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
238{
239 int lock_owner;
240
241 smp_mb();
242 lock_owner = readl_relaxed(&lock->lock);
243 if (lock_owner != SPINLOCK_PID_APPS) {
244 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
245 __func__, lock_owner);
246 }
247
248 __asm__ __volatile__(
249" str %1, [%0]\n"
250 :
251 : "r" (&lock->lock), "r" (0)
252 : "cc");
253}
254/* end ldrex implementation ------------------------------------------------- */
255
256/* sfpb implementation ------------------------------------------------------ */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600257#define SFPB_SPINLOCK_COUNT 8
258#define MSM_SFPB_MUTEX_REG_BASE 0x01200600
259#define MSM_SFPB_MUTEX_REG_SIZE (33 * 4)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700260#define SFPB_SPINLOCK_OFFSET 4
261#define SFPB_SPINLOCK_SIZE 4
262
263static uint32_t lock_count;
264static phys_addr_t reg_base;
265static uint32_t reg_size;
266static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
267static uint32_t lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600268
269static void *hw_mutex_reg_base;
270static DEFINE_MUTEX(hw_map_init_lock);
271
Jeff Hugo2c239e32013-01-18 13:06:14 -0700272static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
Jeff Hugo83e28f52012-12-12 15:16:37 -0700273
274static int init_hw_mutex(struct device_node *node)
275{
276 struct resource r;
277 int rc;
278
279 rc = of_address_to_resource(node, 0, &r);
280 if (rc)
281 BUG();
282
283 rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
284 if (rc)
285 BUG();
286
287 reg_base = r.start;
288 reg_size = (uint32_t)(resource_size(&r));
289 lock_offset = 0;
290 lock_size = reg_size / lock_count;
291
292 return 0;
293}
294
295static void find_and_init_hw_mutex(void)
296{
297 struct device_node *node;
298
Jeff Hugo2c239e32013-01-18 13:06:14 -0700299 node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
Jeff Hugo83e28f52012-12-12 15:16:37 -0700300 if (node) {
301 init_hw_mutex(node);
302 } else {
303 lock_count = SFPB_SPINLOCK_COUNT;
304 reg_base = MSM_SFPB_MUTEX_REG_BASE;
305 reg_size = MSM_SFPB_MUTEX_REG_SIZE;
306 lock_offset = SFPB_SPINLOCK_OFFSET;
307 lock_size = SFPB_SPINLOCK_SIZE;
308 }
309 hw_mutex_reg_base = ioremap(reg_base, reg_size);
310 BUG_ON(hw_mutex_reg_base == NULL);
311}
312
Jeff Hugo416539f2013-01-16 17:24:36 -0700313static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600314{
Jeff Hugo83e28f52012-12-12 15:16:37 -0700315 /*
316 * Optimistic locking. Init only needs to be done once by the first
317 * caller. After that, serializing inits between different callers
318 * is unnecessary. The second check after the lock ensures init
319 * wasn't previously completed by someone else before the lock could
320 * be grabbed.
321 */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600322 if (!hw_mutex_reg_base) {
323 mutex_lock(&hw_map_init_lock);
324 if (!hw_mutex_reg_base)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700325 find_and_init_hw_mutex();
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600326 mutex_unlock(&hw_map_init_lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600327 }
328
Jeff Hugo83e28f52012-12-12 15:16:37 -0700329 if (id >= lock_count)
330 return -EINVAL;
331
332 *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600333 return 0;
334}
335
Jeff Hugo416539f2013-01-16 17:24:36 -0700336static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600337{
Jeff Hugo416539f2013-01-16 17:24:36 -0700338 do {
339 writel_relaxed(SPINLOCK_PID_APPS, lock);
340 smp_mb();
341 } while (readl_relaxed(lock) != SPINLOCK_PID_APPS);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600342}
343
Jeff Hugo416539f2013-01-16 17:24:36 -0700344static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345{
Jeff Hugod36eae62013-03-04 16:56:40 -0700346 writel_relaxed(SPINLOCK_PID_APPS, lock);
347 smp_mb();
348 return readl_relaxed(lock) == SPINLOCK_PID_APPS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349}
350
Jeff Hugo416539f2013-01-16 17:24:36 -0700351static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600352{
Jeff Hugo416539f2013-01-16 17:24:36 -0700353 int lock_owner;
354
355 lock_owner = readl_relaxed(lock);
356 if (lock_owner != SPINLOCK_PID_APPS) {
357 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
358 __func__, lock_owner);
359 }
360
361 writel_relaxed(0, lock);
362 smp_mb();
363}
364/* end sfpb implementation -------------------------------------------------- */
365
366/* common spinlock API ------------------------------------------------------ */
367/**
368 * Release spinlock if it is owned by @pid.
369 *
370 * This is only to be used for situations where the processor owning
371 * the spinlock has crashed and the spinlock must be released.
372 *
373 * @lock: lock structure
374 * @pid: processor ID of processor to release
375 */
376static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
377 uint32_t pid)
378{
379 int ret = 1;
380
381 if (readl_relaxed(&lock->lock) == pid) {
382 writel_relaxed(0, &lock->lock);
383 wmb();
384 ret = 0;
385 }
386 return ret;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600387}
388
Jeff Hugo416539f2013-01-16 17:24:36 -0700389/**
390 * Return owner of the spinlock.
391 *
392 * @lock: pointer to lock structure
393 * @returns: >= 0 owned PID; < 0 for error case
394 *
395 * Used for testing. PID's are assumed to be 31 bits or less.
396 */
397static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
398{
399 rmb();
400 return readl_relaxed(&lock->lock);
401}
402
403
Jeff Hugo2c239e32013-01-18 13:06:14 -0700404static int dt_node_is_valid(const struct device_node *node)
405{
406 const char *status;
407 int statlen;
408
409 status = of_get_property(node, "status", &statlen);
410 if (status == NULL)
411 return 1;
412
413 if (statlen > 0) {
414 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
415 return 1;
416 }
417
418 return 0;
419}
420
Jeff Hugo416539f2013-01-16 17:24:36 -0700421static void initialize_ops(void)
422{
423 struct device_node *node;
424
425 switch (current_mode) {
426 case DEKKERS_MODE:
427 current_ops.lock = __raw_remote_dek_spin_lock;
428 current_ops.unlock = __raw_remote_dek_spin_unlock;
429 current_ops.trylock = __raw_remote_dek_spin_trylock;
430 current_ops.release = __raw_remote_dek_spin_release;
431 current_ops.owner = __raw_remote_dek_spin_owner;
432 is_hw_lock_type = 0;
433 break;
434 case SWP_MODE:
435 current_ops.lock = __raw_remote_swp_spin_lock;
436 current_ops.unlock = __raw_remote_swp_spin_unlock;
437 current_ops.trylock = __raw_remote_swp_spin_trylock;
438 current_ops.release = __raw_remote_gen_spin_release;
439 current_ops.owner = __raw_remote_gen_spin_owner;
440 is_hw_lock_type = 0;
441 break;
442 case LDREX_MODE:
443 current_ops.lock = __raw_remote_ex_spin_lock;
444 current_ops.unlock = __raw_remote_ex_spin_unlock;
445 current_ops.trylock = __raw_remote_ex_spin_trylock;
446 current_ops.release = __raw_remote_gen_spin_release;
447 current_ops.owner = __raw_remote_gen_spin_owner;
448 is_hw_lock_type = 0;
449 break;
450 case SFPB_MODE:
451 current_ops.lock = __raw_remote_sfpb_spin_lock;
452 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
453 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
454 current_ops.release = __raw_remote_gen_spin_release;
455 current_ops.owner = __raw_remote_gen_spin_owner;
456 is_hw_lock_type = 1;
457 break;
458 case AUTO_MODE:
Jeff Hugo2c239e32013-01-18 13:06:14 -0700459 /*
460 * of_find_compatible_node() returns a valid pointer even if
461 * the status property is "disabled", so the validity needs
462 * to be checked
463 */
464 node = of_find_compatible_node(NULL, NULL,
465 sfpb_compatible_string);
466 if (node && dt_node_is_valid(node)) {
Jeff Hugo416539f2013-01-16 17:24:36 -0700467 current_ops.lock = __raw_remote_sfpb_spin_lock;
468 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
469 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
470 current_ops.release = __raw_remote_gen_spin_release;
471 current_ops.owner = __raw_remote_gen_spin_owner;
472 is_hw_lock_type = 1;
Jeff Hugo2c239e32013-01-18 13:06:14 -0700473 break;
474 }
475
476 node = of_find_compatible_node(NULL, NULL,
477 ldrex_compatible_string);
478 if (node && dt_node_is_valid(node)) {
Jeff Hugo416539f2013-01-16 17:24:36 -0700479 current_ops.lock = __raw_remote_ex_spin_lock;
480 current_ops.unlock = __raw_remote_ex_spin_unlock;
481 current_ops.trylock = __raw_remote_ex_spin_trylock;
482 current_ops.release = __raw_remote_gen_spin_release;
483 current_ops.owner = __raw_remote_gen_spin_owner;
484 is_hw_lock_type = 0;
Jeff Hugo2c239e32013-01-18 13:06:14 -0700485 break;
Jeff Hugo416539f2013-01-16 17:24:36 -0700486 }
Jeff Hugo2c239e32013-01-18 13:06:14 -0700487
488 current_ops.lock = __raw_remote_ex_spin_lock;
489 current_ops.unlock = __raw_remote_ex_spin_unlock;
490 current_ops.trylock = __raw_remote_ex_spin_trylock;
491 current_ops.release = __raw_remote_gen_spin_release;
492 current_ops.owner = __raw_remote_gen_spin_owner;
493 is_hw_lock_type = 0;
494 pr_warn("Falling back to LDREX remote spinlock implementation");
Jeff Hugo416539f2013-01-16 17:24:36 -0700495 break;
496 default:
497 BUG();
498 break;
499 }
500}
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600501
502/**
503 * Release all spinlocks owned by @pid.
504 *
505 * This is only to be used for situations where the processor owning
506 * spinlocks has crashed and the spinlocks must be released.
507 *
508 * @pid - processor ID of processor to release
509 */
510static void remote_spin_release_all_locks(uint32_t pid, int count)
511{
512 int n;
513 _remote_spinlock_t lock;
514
515 for (n = 0; n < count; ++n) {
516 if (remote_spinlock_init_address(n, &lock) == 0)
517 _remote_spin_release(&lock, pid);
518 }
519}
520
Jeff Hugo416539f2013-01-16 17:24:36 -0700521void _remote_spin_release_all(uint32_t pid)
522{
523 remote_spin_release_all_locks(pid, lock_count);
524}
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static int
527remote_spinlock_dal_init(const char *chunk_name, _remote_spinlock_t *lock)
528{
529 void *dal_smem_start, *dal_smem_end;
530 uint32_t dal_smem_size;
531 struct dal_chunk_header *cur_header;
532
533 if (!chunk_name)
534 return -EINVAL;
535
536 dal_smem_start = smem_get_entry(SMEM_DAL_AREA, &dal_smem_size);
537 if (!dal_smem_start)
538 return -ENXIO;
539
540 dal_smem_end = dal_smem_start + dal_smem_size;
541
542 /* Find first chunk header */
543 cur_header = (struct dal_chunk_header *)
544 (((uint32_t)dal_smem_start + (4095)) & ~4095);
545 *lock = NULL;
546 while (cur_header->size != 0
547 && ((uint32_t)(cur_header + 1) < (uint32_t)dal_smem_end)) {
548
549 /* Check if chunk name matches */
550 if (!strncmp(cur_header->name, chunk_name,
551 DAL_CHUNK_NAME_LENGTH)) {
552 *lock = (_remote_spinlock_t)&cur_header->lock;
553 return 0;
554 }
555 cur_header = (void *)cur_header + cur_header->size;
556 }
557
558 pr_err("%s: DAL remote lock \"%s\" not found.\n", __func__,
559 chunk_name);
560 return -EINVAL;
561}
562
Jeff Hugo416539f2013-01-16 17:24:36 -0700563#define SMEM_SPINLOCK_COUNT 8
564#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
565
566static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
567{
568 _remote_spinlock_t spinlock_start;
569
570 if (id >= SMEM_SPINLOCK_COUNT)
571 return -EINVAL;
572
573 spinlock_start = smem_alloc(SMEM_SPINLOCK_ARRAY,
574 SMEM_SPINLOCK_ARRAY_SIZE);
575 if (spinlock_start == NULL)
576 return -ENXIO;
577
578 *lock = spinlock_start + id;
579
580 lock_count = SMEM_SPINLOCK_COUNT;
581
582 return 0;
583}
584
585static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
586{
587 if (is_hw_lock_type)
588 return remote_spinlock_init_address_hw(id, lock);
589 else
590 return remote_spinlock_init_address_smem(id, lock);
591}
592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
594{
595 BUG_ON(id == NULL);
596
Jeff Hugo416539f2013-01-16 17:24:36 -0700597 /*
598 * Optimistic locking. Init only needs to be done once by the first
599 * caller. After that, serializing inits between different callers
600 * is unnecessary. The second check after the lock ensures init
601 * wasn't previously completed by someone else before the lock could
602 * be grabbed.
603 */
604 if (!current_ops.lock) {
605 mutex_lock(&ops_init_lock);
606 if (!current_ops.lock)
607 initialize_ops();
608 mutex_unlock(&ops_init_lock);
609 }
610
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 if (id[0] == 'D' && id[1] == ':') {
612 /* DAL chunk name starts after "D:" */
613 return remote_spinlock_dal_init(&id[2], lock);
614 } else if (id[0] == 'S' && id[1] == ':') {
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600615 /* Single-digit lock ID follows "S:" */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 BUG_ON(id[3] != '\0');
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600617
618 return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
Jeff Hugo416539f2013-01-16 17:24:36 -0700619 lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600620 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 return -EINVAL;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600622 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623}
624
Jeff Hugo416539f2013-01-16 17:24:36 -0700625/*
626 * lock comes in as a pointer to a pointer to the lock location, so it must
627 * be dereferenced and casted to the right type for the actual lock
628 * implementation functions
629 */
630void _remote_spin_lock(_remote_spinlock_t *lock)
631{
632 if (unlikely(!current_ops.lock))
633 BUG();
634 current_ops.lock((raw_remote_spinlock_t *)(*lock));
635}
636EXPORT_SYMBOL(_remote_spin_lock);
637
638void _remote_spin_unlock(_remote_spinlock_t *lock)
639{
640 if (unlikely(!current_ops.unlock))
641 BUG();
642 current_ops.unlock((raw_remote_spinlock_t *)(*lock));
643}
644EXPORT_SYMBOL(_remote_spin_unlock);
645
646int _remote_spin_trylock(_remote_spinlock_t *lock)
647{
648 if (unlikely(!current_ops.trylock))
649 BUG();
650 return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
651}
652EXPORT_SYMBOL(_remote_spin_trylock);
653
654int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
655{
656 if (unlikely(!current_ops.release))
657 BUG();
658 return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
659}
660EXPORT_SYMBOL(_remote_spin_release);
661
662int _remote_spin_owner(_remote_spinlock_t *lock)
663{
664 if (unlikely(!current_ops.owner))
665 BUG();
666 return current_ops.owner((raw_remote_spinlock_t *)(*lock));
667}
668EXPORT_SYMBOL(_remote_spin_owner);
669/* end common spinlock API -------------------------------------------------- */
670
671/* remote mutex implementation ---------------------------------------------- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672int _remote_mutex_init(struct remote_mutex_id *id, _remote_mutex_t *lock)
673{
674 BUG_ON(id == NULL);
675
676 lock->delay_us = id->delay_us;
677 return _remote_spin_lock_init(id->r_spinlock_id, &(lock->r_spinlock));
678}
679EXPORT_SYMBOL(_remote_mutex_init);
680
681void _remote_mutex_lock(_remote_mutex_t *lock)
682{
683 while (!_remote_spin_trylock(&(lock->r_spinlock))) {
684 if (lock->delay_us >= 1000)
685 msleep(lock->delay_us/1000);
686 else
687 udelay(lock->delay_us);
688 }
689}
690EXPORT_SYMBOL(_remote_mutex_lock);
691
692void _remote_mutex_unlock(_remote_mutex_t *lock)
693{
694 _remote_spin_unlock(&(lock->r_spinlock));
695}
696EXPORT_SYMBOL(_remote_mutex_unlock);
697
698int _remote_mutex_trylock(_remote_mutex_t *lock)
699{
700 return _remote_spin_trylock(&(lock->r_spinlock));
701}
702EXPORT_SYMBOL(_remote_mutex_trylock);
Jeff Hugo416539f2013-01-16 17:24:36 -0700703/* end remote mutex implementation ------------------------------------------ */