msm: remote_spinlock: Add support for hardware mutexes
Add support for hardware mutexes.
Signed-off-by: Eric Holmberg <eholmber@codeaurora.org>
diff --git a/arch/arm/mach-msm/include/mach/remote_spinlock.h b/arch/arm/mach-msm/include/mach/remote_spinlock.h
index fa889b6..75b70f3 100644
--- a/arch/arm/mach-msm/include/mach/remote_spinlock.h
+++ b/arch/arm/mach-msm/include/mach/remote_spinlock.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009, 2011 Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#include <linux/io.h>
#include <linux/types.h>
/* Remote spinlock definitions. */
@@ -38,6 +39,7 @@
typedef raw_remote_spinlock_t *_remote_spinlock_t;
#define remote_spinlock_id_t const char *
+#define SMEM_SPINLOCK_PID_APPS 1
static inline void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
{
@@ -173,14 +175,63 @@
lock->dek.self_lock = DEK_LOCK_YIELD;
}
-#ifdef CONFIG_MSM_SMD
+static inline int __raw_remote_dek_spin_release(raw_remote_spinlock_t *lock,
+ uint32_t pid)
+{
+ return -EINVAL;
+}
+
+static inline void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
+{
+ do {
+ writel_relaxed(SMEM_SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ } while (readl_relaxed(lock) != SMEM_SPINLOCK_PID_APPS);
+}
+
+static inline int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ return 1;
+}
+
+static inline void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ writel_relaxed(0, lock);
+ smp_mb();
+}
+
+/**
+ * Release spinlock if it is owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * the spinlock has crashed and the spinlock must be released.
+ *
+ * @lock - lock structure
+ * @pid - processor ID of processor to release
+ */
+static inline int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
+ uint32_t pid)
+{
+ int ret = 1;
+
+ if (readl_relaxed(&lock->lock) == pid) {
+ writel_relaxed(0, &lock->lock);
+ wmb();
+ ret = 0;
+ }
+ return ret;
+}
+
+#if defined(CONFIG_MSM_SMD) || defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
int _remote_spin_lock_init(remote_spinlock_id_t, _remote_spinlock_t *lock);
+void _remote_spin_release_all(uint32_t pid);
#else
static inline
int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
{
return -EINVAL;
}
+static inline void _remote_spin_release_all(uint32_t pid) {}
#endif
#if defined(CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS)
@@ -189,16 +240,29 @@
#define _remote_spin_lock(lock) __raw_remote_dek_spin_lock(*lock)
#define _remote_spin_unlock(lock) __raw_remote_dek_spin_unlock(*lock)
#define _remote_spin_trylock(lock) __raw_remote_dek_spin_trylock(*lock)
+#define _remote_spin_release(lock, pid) __raw_remote_dek_spin_release(*lock,\
+ pid)
#elif defined(CONFIG_MSM_REMOTE_SPINLOCK_SWP)
/* Use SWP-based locks when LDREX/STREX are unavailable for shared memory. */
#define _remote_spin_lock(lock) __raw_remote_swp_spin_lock(*lock)
#define _remote_spin_unlock(lock) __raw_remote_swp_spin_unlock(*lock)
#define _remote_spin_trylock(lock) __raw_remote_swp_spin_trylock(*lock)
+#define _remote_spin_release(lock, pid) __raw_remote_gen_spin_release(*lock,\
+ pid)
+#elif defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
+/* Use SFPB Hardware Mutex Registers */
+#define _remote_spin_lock(lock) __raw_remote_sfpb_spin_lock(*lock)
+#define _remote_spin_unlock(lock) __raw_remote_sfpb_spin_unlock(*lock)
+#define _remote_spin_trylock(lock) __raw_remote_sfpb_spin_trylock(*lock)
+#define _remote_spin_release(lock, pid) __raw_remote_gen_spin_release(*lock,\
+ pid)
#else
/* Use LDREX/STREX for shared memory locking, when available */
#define _remote_spin_lock(lock) __raw_remote_ex_spin_lock(*lock)
#define _remote_spin_unlock(lock) __raw_remote_ex_spin_unlock(*lock)
#define _remote_spin_trylock(lock) __raw_remote_ex_spin_trylock(*lock)
+#define _remote_spin_release(lock, pid) __raw_remote_gen_spin_release(*lock, \
+ pid)
#endif
/* Remote mutex definitions. */
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
index 2bb4475..2480433 100644
--- a/arch/arm/mach-msm/remote_spinlock.c
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2009, 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,15 +18,50 @@
#include <asm/system.h>
+#include <mach/msm_iomap.h>
#include <mach/remote_spinlock.h>
#include <mach/dal.h>
#include "smd_private.h"
#include <linux/module.h>
+static void remote_spin_release_all_locks(uint32_t pid, int count);
+
+#if defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
+#define SFPB_SPINLOCK_COUNT 8
+#define MSM_SFPB_MUTEX_REG_BASE 0x01200600
+#define MSM_SFPB_MUTEX_REG_SIZE (33 * 4)
+
+static void *hw_mutex_reg_base;
+static DEFINE_MUTEX(hw_map_init_lock);
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
+{
+ if (id >= SFPB_SPINLOCK_COUNT)
+ return -EINVAL;
+
+ if (!hw_mutex_reg_base) {
+ mutex_lock(&hw_map_init_lock);
+ if (!hw_mutex_reg_base)
+ hw_mutex_reg_base = ioremap(MSM_SFPB_MUTEX_REG_BASE,
+ MSM_SFPB_MUTEX_REG_SIZE);
+ mutex_unlock(&hw_map_init_lock);
+ BUG_ON(hw_mutex_reg_base == NULL);
+ }
+
+ *lock = hw_mutex_reg_base + 0x4 + id * 4;
+ return 0;
+}
+
+void _remote_spin_release_all(uint32_t pid)
+{
+ remote_spin_release_all_locks(pid, SFPB_SPINLOCK_COUNT);
+}
+
+#else
#define SMEM_SPINLOCK_COUNT 8
#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
-static int remote_spinlock_smem_init(int id, _remote_spinlock_t *lock)
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
{
_remote_spinlock_t spinlock_start;
@@ -43,6 +78,32 @@
return 0;
}
+void _remote_spin_release_all(uint32_t pid)
+{
+ remote_spin_release_all_locks(pid, SMEM_SPINLOCK_COUNT);
+}
+
+#endif
+
+/**
+ * Release all spinlocks owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * spinlocks has crashed and the spinlocks must be released.
+ *
+ * @pid - processor ID of processor to release
+ */
+static void remote_spin_release_all_locks(uint32_t pid, int count)
+{
+ int n;
+ _remote_spinlock_t lock;
+
+ for (n = 0; n < count; ++n) {
+ if (remote_spinlock_init_address(n, &lock) == 0)
+ _remote_spin_release(&lock, pid);
+ }
+}
+
static int
remote_spinlock_dal_init(const char *chunk_name, _remote_spinlock_t *lock)
{
@@ -88,11 +149,14 @@
/* DAL chunk name starts after "D:" */
return remote_spinlock_dal_init(&id[2], lock);
} else if (id[0] == 'S' && id[1] == ':') {
- /* Single-digit SMEM lock ID follows "S:" */
+ /* Single-digit lock ID follows "S:" */
BUG_ON(id[3] != '\0');
- return remote_spinlock_smem_init((((uint8_t)id[2])-'0'), lock);
- } else
+
+ return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
+ lock);
+ } else {
return -EINVAL;
+ }
}
int _remote_mutex_init(struct remote_mutex_id *id, _remote_mutex_t *lock)
diff --git a/include/linux/remote_spinlock.h b/include/linux/remote_spinlock.h
index 535592e..8d7c7e7 100644
--- a/include/linux/remote_spinlock.h
+++ b/include/linux/remote_spinlock.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2009, 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,11 @@
: 0; \
})
+#define remote_spin_release(lock, pid) \
+ _remote_spin_release(&((lock)->remote), pid)
+
+#define remote_spin_release_all(pid) \
+ _remote_spin_release_all(pid)
typedef struct {
struct mutex local;