blob: a9ebd7c167d9ebf97326ab1af220283e62c6f291 [file] [log] [blame]
Jeff Hugo416539f2013-01-16 17:24:36 -07001/* Copyright (c) 2008-2009, 2011-2013 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/delay.h>
Jeff Hugo83e28f52012-12-12 15:16:37 -070018#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include <asm/system.h>
24
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060025#include <mach/msm_iomap.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <mach/remote_spinlock.h>
27#include <mach/dal.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060028#include <mach/msm_smem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060031
Jeff Hugo416539f2013-01-16 17:24:36 -070032#define SPINLOCK_PID_APPS 1
33
34#define AUTO_MODE -1
35#define DEKKERS_MODE 1
36#define SWP_MODE 2
37#define LDREX_MODE 3
38#define SFPB_MODE 4
39
40#if defined(CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS) ||\
41 defined(CONFIG_MSM_REMOTE_SPINLOCK_SWP) ||\
42 defined(CONFIG_MSM_REMOTE_SPINLOCK_LDREX) ||\
43 defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
44
45#ifdef CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS
46/*
47 * Use Dekker's algorithm when LDREX/STREX and SWP are unavailable for
48 * shared memory
49 */
50#define CURRENT_MODE_INIT DEKKERS_MODE;
51#endif
52
53#ifdef CONFIG_MSM_REMOTE_SPINLOCK_SWP
54/* Use SWP-based locks when LDREX/STREX are unavailable for shared memory. */
55#define CURRENT_MODE_INIT SWP_MODE;
56#endif
57
58#ifdef CONFIG_MSM_REMOTE_SPINLOCK_LDREX
59/* Use LDREX/STREX for shared memory locking, when available */
60#define CURRENT_MODE_INIT LDREX_MODE;
61#endif
62
63#ifdef CONFIG_MSM_REMOTE_SPINLOCK_SFPB
64/* Use SFPB Hardware Mutex Registers */
65#define CURRENT_MODE_INIT SFPB_MODE;
66#endif
67
68#else
69/* Use DT info to configure with a fallback to LDREX if DT is missing */
70#define CURRENT_MODE_INIT AUTO_MODE;
71#endif
72
73static int current_mode = CURRENT_MODE_INIT;
74
75static int is_hw_lock_type;
76static DEFINE_MUTEX(ops_init_lock);
77
78struct spinlock_ops {
79 void (*lock)(raw_remote_spinlock_t *lock);
80 void (*unlock)(raw_remote_spinlock_t *lock);
81 int (*trylock)(raw_remote_spinlock_t *lock);
82 int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
83 int (*owner)(raw_remote_spinlock_t *lock);
84};
85
86static struct spinlock_ops current_ops;
87
88static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
89
90/* dekkers implementation --------------------------------------------------- */
91#define DEK_LOCK_REQUEST 1
92#define DEK_LOCK_YIELD (!DEK_LOCK_REQUEST)
93#define DEK_YIELD_TURN_SELF 0
94static void __raw_remote_dek_spin_lock(raw_remote_spinlock_t *lock)
95{
96 lock->dek.self_lock = DEK_LOCK_REQUEST;
97
98 while (lock->dek.other_lock) {
99
100 if (lock->dek.next_yield == DEK_YIELD_TURN_SELF)
101 lock->dek.self_lock = DEK_LOCK_YIELD;
102
103 while (lock->dek.other_lock)
104 ;
105
106 lock->dek.self_lock = DEK_LOCK_REQUEST;
107 }
108 lock->dek.next_yield = DEK_YIELD_TURN_SELF;
109
110 smp_mb();
111}
112
113static int __raw_remote_dek_spin_trylock(raw_remote_spinlock_t *lock)
114{
115 lock->dek.self_lock = DEK_LOCK_REQUEST;
116
117 if (lock->dek.other_lock) {
118 lock->dek.self_lock = DEK_LOCK_YIELD;
119 return 0;
120 }
121
122 lock->dek.next_yield = DEK_YIELD_TURN_SELF;
123
124 smp_mb();
125 return 1;
126}
127
128static void __raw_remote_dek_spin_unlock(raw_remote_spinlock_t *lock)
129{
130 smp_mb();
131
132 lock->dek.self_lock = DEK_LOCK_YIELD;
133}
134
135static int __raw_remote_dek_spin_release(raw_remote_spinlock_t *lock,
136 uint32_t pid)
137{
138 return -EPERM;
139}
140
141static int __raw_remote_dek_spin_owner(raw_remote_spinlock_t *lock)
142{
143 return -EPERM;
144}
145/* end dekkers implementation ----------------------------------------------- */
146
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700147#ifndef CONFIG_THUMB2_KERNEL
Jeff Hugo416539f2013-01-16 17:24:36 -0700148/* swp implementation ------------------------------------------------------- */
149static void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
150{
151 unsigned long tmp;
152
153 __asm__ __volatile__(
154"1: swp %0, %2, [%1]\n"
155" teq %0, #0\n"
156" bne 1b"
157 : "=&r" (tmp)
158 : "r" (&lock->lock), "r" (1)
159 : "cc");
160
161 smp_mb();
162}
163
164static int __raw_remote_swp_spin_trylock(raw_remote_spinlock_t *lock)
165{
166 unsigned long tmp;
167
168 __asm__ __volatile__(
169" swp %0, %2, [%1]\n"
170 : "=&r" (tmp)
171 : "r" (&lock->lock), "r" (1)
172 : "cc");
173
174 if (tmp == 0) {
175 smp_mb();
176 return 1;
177 }
178 return 0;
179}
180
181static void __raw_remote_swp_spin_unlock(raw_remote_spinlock_t *lock)
182{
183 int lock_owner;
184
185 smp_mb();
186 lock_owner = readl_relaxed(&lock->lock);
187 if (lock_owner != SPINLOCK_PID_APPS) {
188 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
189 __func__, lock_owner);
190 }
191
192 __asm__ __volatile__(
193" str %1, [%0]"
194 :
195 : "r" (&lock->lock), "r" (0)
196 : "cc");
197}
198/* end swp implementation --------------------------------------------------- */
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700199#endif
Jeff Hugo416539f2013-01-16 17:24:36 -0700200
201/* ldrex implementation ----------------------------------------------------- */
Jeff Hugo2c239e32013-01-18 13:06:14 -0700202static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
203
Jeff Hugo416539f2013-01-16 17:24:36 -0700204static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
205{
206 unsigned long tmp;
207
208 __asm__ __volatile__(
209"1: ldrex %0, [%1]\n"
210" teq %0, #0\n"
211" strexeq %0, %2, [%1]\n"
212" teqeq %0, #0\n"
213" bne 1b"
214 : "=&r" (tmp)
215 : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
216 : "cc");
217
218 smp_mb();
219}
220
221static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
222{
223 unsigned long tmp;
224
225 __asm__ __volatile__(
226" ldrex %0, [%1]\n"
227" teq %0, #0\n"
228" strexeq %0, %2, [%1]\n"
229 : "=&r" (tmp)
230 : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
231 : "cc");
232
233 if (tmp == 0) {
234 smp_mb();
235 return 1;
236 }
237 return 0;
238}
239
240static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
241{
242 int lock_owner;
243
244 smp_mb();
245 lock_owner = readl_relaxed(&lock->lock);
246 if (lock_owner != SPINLOCK_PID_APPS) {
247 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
248 __func__, lock_owner);
249 }
250
251 __asm__ __volatile__(
252" str %1, [%0]\n"
253 :
254 : "r" (&lock->lock), "r" (0)
255 : "cc");
256}
257/* end ldrex implementation ------------------------------------------------- */
258
259/* sfpb implementation ------------------------------------------------------ */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600260#define SFPB_SPINLOCK_COUNT 8
261#define MSM_SFPB_MUTEX_REG_BASE 0x01200600
262#define MSM_SFPB_MUTEX_REG_SIZE (33 * 4)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700263#define SFPB_SPINLOCK_OFFSET 4
264#define SFPB_SPINLOCK_SIZE 4
265
266static uint32_t lock_count;
267static phys_addr_t reg_base;
268static uint32_t reg_size;
269static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
270static uint32_t lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600271
272static void *hw_mutex_reg_base;
273static DEFINE_MUTEX(hw_map_init_lock);
274
Jeff Hugo2c239e32013-01-18 13:06:14 -0700275static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
Jeff Hugo83e28f52012-12-12 15:16:37 -0700276
277static int init_hw_mutex(struct device_node *node)
278{
279 struct resource r;
280 int rc;
281
282 rc = of_address_to_resource(node, 0, &r);
283 if (rc)
284 BUG();
285
286 rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
287 if (rc)
288 BUG();
289
290 reg_base = r.start;
291 reg_size = (uint32_t)(resource_size(&r));
292 lock_offset = 0;
293 lock_size = reg_size / lock_count;
294
295 return 0;
296}
297
298static void find_and_init_hw_mutex(void)
299{
300 struct device_node *node;
301
Jeff Hugo2c239e32013-01-18 13:06:14 -0700302 node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
Jeff Hugo83e28f52012-12-12 15:16:37 -0700303 if (node) {
304 init_hw_mutex(node);
305 } else {
306 lock_count = SFPB_SPINLOCK_COUNT;
307 reg_base = MSM_SFPB_MUTEX_REG_BASE;
308 reg_size = MSM_SFPB_MUTEX_REG_SIZE;
309 lock_offset = SFPB_SPINLOCK_OFFSET;
310 lock_size = SFPB_SPINLOCK_SIZE;
311 }
312 hw_mutex_reg_base = ioremap(reg_base, reg_size);
313 BUG_ON(hw_mutex_reg_base == NULL);
314}
315
Jeff Hugo416539f2013-01-16 17:24:36 -0700316static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600317{
Jeff Hugo83e28f52012-12-12 15:16:37 -0700318 /*
319 * Optimistic locking. Init only needs to be done once by the first
320 * caller. After that, serializing inits between different callers
321 * is unnecessary. The second check after the lock ensures init
322 * wasn't previously completed by someone else before the lock could
323 * be grabbed.
324 */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600325 if (!hw_mutex_reg_base) {
326 mutex_lock(&hw_map_init_lock);
327 if (!hw_mutex_reg_base)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700328 find_and_init_hw_mutex();
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600329 mutex_unlock(&hw_map_init_lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600330 }
331
Jeff Hugo83e28f52012-12-12 15:16:37 -0700332 if (id >= lock_count)
333 return -EINVAL;
334
335 *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600336 return 0;
337}
338
Jeff Hugo416539f2013-01-16 17:24:36 -0700339static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600340{
Jeff Hugo416539f2013-01-16 17:24:36 -0700341 do {
342 writel_relaxed(SPINLOCK_PID_APPS, lock);
343 smp_mb();
344 } while (readl_relaxed(lock) != SPINLOCK_PID_APPS);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600345}
346
Jeff Hugo416539f2013-01-16 17:24:36 -0700347static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348{
Jeff Hugod36eae62013-03-04 16:56:40 -0700349 writel_relaxed(SPINLOCK_PID_APPS, lock);
350 smp_mb();
351 return readl_relaxed(lock) == SPINLOCK_PID_APPS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352}
353
Jeff Hugo416539f2013-01-16 17:24:36 -0700354static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600355{
Jeff Hugo416539f2013-01-16 17:24:36 -0700356 int lock_owner;
357
358 lock_owner = readl_relaxed(lock);
359 if (lock_owner != SPINLOCK_PID_APPS) {
360 pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
361 __func__, lock_owner);
362 }
363
364 writel_relaxed(0, lock);
365 smp_mb();
366}
367/* end sfpb implementation -------------------------------------------------- */
368
369/* common spinlock API ------------------------------------------------------ */
370/**
371 * Release spinlock if it is owned by @pid.
372 *
373 * This is only to be used for situations where the processor owning
374 * the spinlock has crashed and the spinlock must be released.
375 *
376 * @lock: lock structure
377 * @pid: processor ID of processor to release
378 */
379static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
380 uint32_t pid)
381{
382 int ret = 1;
383
384 if (readl_relaxed(&lock->lock) == pid) {
385 writel_relaxed(0, &lock->lock);
386 wmb();
387 ret = 0;
388 }
389 return ret;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600390}
391
Jeff Hugo416539f2013-01-16 17:24:36 -0700392/**
393 * Return owner of the spinlock.
394 *
395 * @lock: pointer to lock structure
396 * @returns: >= 0 owned PID; < 0 for error case
397 *
398 * Used for testing. PID's are assumed to be 31 bits or less.
399 */
400static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
401{
402 rmb();
403 return readl_relaxed(&lock->lock);
404}
405
406
Jeff Hugo2c239e32013-01-18 13:06:14 -0700407static int dt_node_is_valid(const struct device_node *node)
408{
409 const char *status;
410 int statlen;
411
412 status = of_get_property(node, "status", &statlen);
413 if (status == NULL)
414 return 1;
415
416 if (statlen > 0) {
417 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
418 return 1;
419 }
420
421 return 0;
422}
423
Jeff Hugo416539f2013-01-16 17:24:36 -0700424static void initialize_ops(void)
425{
426 struct device_node *node;
427
428 switch (current_mode) {
429 case DEKKERS_MODE:
430 current_ops.lock = __raw_remote_dek_spin_lock;
431 current_ops.unlock = __raw_remote_dek_spin_unlock;
432 current_ops.trylock = __raw_remote_dek_spin_trylock;
433 current_ops.release = __raw_remote_dek_spin_release;
434 current_ops.owner = __raw_remote_dek_spin_owner;
435 is_hw_lock_type = 0;
436 break;
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700437#ifndef CONFIG_THUMB2_KERNEL
Jeff Hugo416539f2013-01-16 17:24:36 -0700438 case SWP_MODE:
439 current_ops.lock = __raw_remote_swp_spin_lock;
440 current_ops.unlock = __raw_remote_swp_spin_unlock;
441 current_ops.trylock = __raw_remote_swp_spin_trylock;
442 current_ops.release = __raw_remote_gen_spin_release;
443 current_ops.owner = __raw_remote_gen_spin_owner;
444 is_hw_lock_type = 0;
445 break;
Stephen Boyd6daf2e12013-04-19 00:15:18 -0700446#endif
Jeff Hugo416539f2013-01-16 17:24:36 -0700447 case LDREX_MODE:
448 current_ops.lock = __raw_remote_ex_spin_lock;
449 current_ops.unlock = __raw_remote_ex_spin_unlock;
450 current_ops.trylock = __raw_remote_ex_spin_trylock;
451 current_ops.release = __raw_remote_gen_spin_release;
452 current_ops.owner = __raw_remote_gen_spin_owner;
453 is_hw_lock_type = 0;
454 break;
455 case SFPB_MODE:
456 current_ops.lock = __raw_remote_sfpb_spin_lock;
457 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
458 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
459 current_ops.release = __raw_remote_gen_spin_release;
460 current_ops.owner = __raw_remote_gen_spin_owner;
461 is_hw_lock_type = 1;
462 break;
463 case AUTO_MODE:
Jeff Hugo2c239e32013-01-18 13:06:14 -0700464 /*
465 * of_find_compatible_node() returns a valid pointer even if
466 * the status property is "disabled", so the validity needs
467 * to be checked
468 */
469 node = of_find_compatible_node(NULL, NULL,
470 sfpb_compatible_string);
471 if (node && dt_node_is_valid(node)) {
Jeff Hugo416539f2013-01-16 17:24:36 -0700472 current_ops.lock = __raw_remote_sfpb_spin_lock;
473 current_ops.unlock = __raw_remote_sfpb_spin_unlock;
474 current_ops.trylock = __raw_remote_sfpb_spin_trylock;
475 current_ops.release = __raw_remote_gen_spin_release;
476 current_ops.owner = __raw_remote_gen_spin_owner;
477 is_hw_lock_type = 1;
Jeff Hugo2c239e32013-01-18 13:06:14 -0700478 break;
479 }
480
481 node = of_find_compatible_node(NULL, NULL,
482 ldrex_compatible_string);
483 if (node && dt_node_is_valid(node)) {
Jeff Hugo416539f2013-01-16 17:24:36 -0700484 current_ops.lock = __raw_remote_ex_spin_lock;
485 current_ops.unlock = __raw_remote_ex_spin_unlock;
486 current_ops.trylock = __raw_remote_ex_spin_trylock;
487 current_ops.release = __raw_remote_gen_spin_release;
488 current_ops.owner = __raw_remote_gen_spin_owner;
489 is_hw_lock_type = 0;
Jeff Hugo2c239e32013-01-18 13:06:14 -0700490 break;
Jeff Hugo416539f2013-01-16 17:24:36 -0700491 }
Jeff Hugo2c239e32013-01-18 13:06:14 -0700492
493 current_ops.lock = __raw_remote_ex_spin_lock;
494 current_ops.unlock = __raw_remote_ex_spin_unlock;
495 current_ops.trylock = __raw_remote_ex_spin_trylock;
496 current_ops.release = __raw_remote_gen_spin_release;
497 current_ops.owner = __raw_remote_gen_spin_owner;
498 is_hw_lock_type = 0;
499 pr_warn("Falling back to LDREX remote spinlock implementation");
Jeff Hugo416539f2013-01-16 17:24:36 -0700500 break;
501 default:
502 BUG();
503 break;
504 }
505}
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600506
507/**
508 * Release all spinlocks owned by @pid.
509 *
510 * This is only to be used for situations where the processor owning
511 * spinlocks has crashed and the spinlocks must be released.
512 *
513 * @pid - processor ID of processor to release
514 */
515static void remote_spin_release_all_locks(uint32_t pid, int count)
516{
517 int n;
518 _remote_spinlock_t lock;
519
520 for (n = 0; n < count; ++n) {
521 if (remote_spinlock_init_address(n, &lock) == 0)
522 _remote_spin_release(&lock, pid);
523 }
524}
525
Jeff Hugo416539f2013-01-16 17:24:36 -0700526void _remote_spin_release_all(uint32_t pid)
527{
528 remote_spin_release_all_locks(pid, lock_count);
529}
530
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531static int
532remote_spinlock_dal_init(const char *chunk_name, _remote_spinlock_t *lock)
533{
534 void *dal_smem_start, *dal_smem_end;
535 uint32_t dal_smem_size;
536 struct dal_chunk_header *cur_header;
537
538 if (!chunk_name)
539 return -EINVAL;
540
541 dal_smem_start = smem_get_entry(SMEM_DAL_AREA, &dal_smem_size);
542 if (!dal_smem_start)
543 return -ENXIO;
544
545 dal_smem_end = dal_smem_start + dal_smem_size;
546
547 /* Find first chunk header */
548 cur_header = (struct dal_chunk_header *)
549 (((uint32_t)dal_smem_start + (4095)) & ~4095);
550 *lock = NULL;
551 while (cur_header->size != 0
552 && ((uint32_t)(cur_header + 1) < (uint32_t)dal_smem_end)) {
553
554 /* Check if chunk name matches */
555 if (!strncmp(cur_header->name, chunk_name,
556 DAL_CHUNK_NAME_LENGTH)) {
557 *lock = (_remote_spinlock_t)&cur_header->lock;
558 return 0;
559 }
560 cur_header = (void *)cur_header + cur_header->size;
561 }
562
563 pr_err("%s: DAL remote lock \"%s\" not found.\n", __func__,
564 chunk_name);
565 return -EINVAL;
566}
567
Jeff Hugo416539f2013-01-16 17:24:36 -0700568#define SMEM_SPINLOCK_COUNT 8
569#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
570
571static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
572{
573 _remote_spinlock_t spinlock_start;
574
575 if (id >= SMEM_SPINLOCK_COUNT)
576 return -EINVAL;
577
578 spinlock_start = smem_alloc(SMEM_SPINLOCK_ARRAY,
579 SMEM_SPINLOCK_ARRAY_SIZE);
580 if (spinlock_start == NULL)
581 return -ENXIO;
582
583 *lock = spinlock_start + id;
584
585 lock_count = SMEM_SPINLOCK_COUNT;
586
587 return 0;
588}
589
590static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
591{
592 if (is_hw_lock_type)
593 return remote_spinlock_init_address_hw(id, lock);
594 else
595 return remote_spinlock_init_address_smem(id, lock);
596}
597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
599{
600 BUG_ON(id == NULL);
601
Jeff Hugo416539f2013-01-16 17:24:36 -0700602 /*
603 * Optimistic locking. Init only needs to be done once by the first
604 * caller. After that, serializing inits between different callers
605 * is unnecessary. The second check after the lock ensures init
606 * wasn't previously completed by someone else before the lock could
607 * be grabbed.
608 */
609 if (!current_ops.lock) {
610 mutex_lock(&ops_init_lock);
611 if (!current_ops.lock)
612 initialize_ops();
613 mutex_unlock(&ops_init_lock);
614 }
615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 if (id[0] == 'D' && id[1] == ':') {
617 /* DAL chunk name starts after "D:" */
618 return remote_spinlock_dal_init(&id[2], lock);
619 } else if (id[0] == 'S' && id[1] == ':') {
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600620 /* Single-digit lock ID follows "S:" */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 BUG_ON(id[3] != '\0');
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600622
623 return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
Jeff Hugo416539f2013-01-16 17:24:36 -0700624 lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600625 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return -EINVAL;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600627 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628}
629
Jeff Hugo416539f2013-01-16 17:24:36 -0700630/*
631 * lock comes in as a pointer to a pointer to the lock location, so it must
632 * be dereferenced and casted to the right type for the actual lock
633 * implementation functions
634 */
635void _remote_spin_lock(_remote_spinlock_t *lock)
636{
637 if (unlikely(!current_ops.lock))
638 BUG();
639 current_ops.lock((raw_remote_spinlock_t *)(*lock));
640}
641EXPORT_SYMBOL(_remote_spin_lock);
642
643void _remote_spin_unlock(_remote_spinlock_t *lock)
644{
645 if (unlikely(!current_ops.unlock))
646 BUG();
647 current_ops.unlock((raw_remote_spinlock_t *)(*lock));
648}
649EXPORT_SYMBOL(_remote_spin_unlock);
650
651int _remote_spin_trylock(_remote_spinlock_t *lock)
652{
653 if (unlikely(!current_ops.trylock))
654 BUG();
655 return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
656}
657EXPORT_SYMBOL(_remote_spin_trylock);
658
659int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
660{
661 if (unlikely(!current_ops.release))
662 BUG();
663 return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
664}
665EXPORT_SYMBOL(_remote_spin_release);
666
667int _remote_spin_owner(_remote_spinlock_t *lock)
668{
669 if (unlikely(!current_ops.owner))
670 BUG();
671 return current_ops.owner((raw_remote_spinlock_t *)(*lock));
672}
673EXPORT_SYMBOL(_remote_spin_owner);
674/* end common spinlock API -------------------------------------------------- */
675
676/* remote mutex implementation ---------------------------------------------- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677int _remote_mutex_init(struct remote_mutex_id *id, _remote_mutex_t *lock)
678{
679 BUG_ON(id == NULL);
680
681 lock->delay_us = id->delay_us;
682 return _remote_spin_lock_init(id->r_spinlock_id, &(lock->r_spinlock));
683}
684EXPORT_SYMBOL(_remote_mutex_init);
685
686void _remote_mutex_lock(_remote_mutex_t *lock)
687{
688 while (!_remote_spin_trylock(&(lock->r_spinlock))) {
689 if (lock->delay_us >= 1000)
690 msleep(lock->delay_us/1000);
691 else
692 udelay(lock->delay_us);
693 }
694}
695EXPORT_SYMBOL(_remote_mutex_lock);
696
697void _remote_mutex_unlock(_remote_mutex_t *lock)
698{
699 _remote_spin_unlock(&(lock->r_spinlock));
700}
701EXPORT_SYMBOL(_remote_mutex_unlock);
702
703int _remote_mutex_trylock(_remote_mutex_t *lock)
704{
705 return _remote_spin_trylock(&(lock->r_spinlock));
706}
707EXPORT_SYMBOL(_remote_mutex_trylock);
Jeff Hugo416539f2013-01-16 17:24:36 -0700708/* end remote mutex implementation ------------------------------------------ */