blob: 98a32784c2e0b469c84f4fd07301f26b0373b5bb [file] [log] [blame]
Jeff Hugo83e28f52012-12-12 15:16:37 -07001/* Copyright (c) 2008-2009, 2011-2012 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/delay.h>
Jeff Hugo83e28f52012-12-12 15:16:37 -070018#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include <asm/system.h>
24
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060025#include <mach/msm_iomap.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <mach/remote_spinlock.h>
27#include <mach/dal.h>
28#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060030static void remote_spin_release_all_locks(uint32_t pid, int count);
31
32#if defined(CONFIG_MSM_REMOTE_SPINLOCK_SFPB)
33#define SFPB_SPINLOCK_COUNT 8
34#define MSM_SFPB_MUTEX_REG_BASE 0x01200600
35#define MSM_SFPB_MUTEX_REG_SIZE (33 * 4)
Jeff Hugo83e28f52012-12-12 15:16:37 -070036#define SFPB_SPINLOCK_OFFSET 4
37#define SFPB_SPINLOCK_SIZE 4
38
39static uint32_t lock_count;
40static phys_addr_t reg_base;
41static uint32_t reg_size;
42static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
43static uint32_t lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060044
45static void *hw_mutex_reg_base;
46static DEFINE_MUTEX(hw_map_init_lock);
47
Jeff Hugo83e28f52012-12-12 15:16:37 -070048static char *compatible_string = "qcom,ipc-spinlock";
49
50static int init_hw_mutex(struct device_node *node)
51{
52 struct resource r;
53 int rc;
54
55 rc = of_address_to_resource(node, 0, &r);
56 if (rc)
57 BUG();
58
59 rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
60 if (rc)
61 BUG();
62
63 reg_base = r.start;
64 reg_size = (uint32_t)(resource_size(&r));
65 lock_offset = 0;
66 lock_size = reg_size / lock_count;
67
68 return 0;
69}
70
71static void find_and_init_hw_mutex(void)
72{
73 struct device_node *node;
74
75 node = of_find_compatible_node(NULL, NULL, compatible_string);
76 if (node) {
77 init_hw_mutex(node);
78 } else {
79 lock_count = SFPB_SPINLOCK_COUNT;
80 reg_base = MSM_SFPB_MUTEX_REG_BASE;
81 reg_size = MSM_SFPB_MUTEX_REG_SIZE;
82 lock_offset = SFPB_SPINLOCK_OFFSET;
83 lock_size = SFPB_SPINLOCK_SIZE;
84 }
85 hw_mutex_reg_base = ioremap(reg_base, reg_size);
86 BUG_ON(hw_mutex_reg_base == NULL);
87}
88
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060089static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
90{
Jeff Hugo83e28f52012-12-12 15:16:37 -070091 /*
92 * Optimistic locking. Init only needs to be done once by the first
93 * caller. After that, serializing inits between different callers
94 * is unnecessary. The second check after the lock ensures init
95 * wasn't previously completed by someone else before the lock could
96 * be grabbed.
97 */
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -060098 if (!hw_mutex_reg_base) {
99 mutex_lock(&hw_map_init_lock);
100 if (!hw_mutex_reg_base)
Jeff Hugo83e28f52012-12-12 15:16:37 -0700101 find_and_init_hw_mutex();
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600102 mutex_unlock(&hw_map_init_lock);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600103 }
104
Jeff Hugo83e28f52012-12-12 15:16:37 -0700105 if (id >= lock_count)
106 return -EINVAL;
107
108 *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600109 return 0;
110}
111
112void _remote_spin_release_all(uint32_t pid)
113{
Jeff Hugo83e28f52012-12-12 15:16:37 -0700114 remote_spin_release_all_locks(pid, lock_count);
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600115}
116
117#else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118#define SMEM_SPINLOCK_COUNT 8
119#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
120
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600121static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122{
123 _remote_spinlock_t spinlock_start;
124
125 if (id >= SMEM_SPINLOCK_COUNT)
126 return -EINVAL;
127
128 spinlock_start = smem_alloc(SMEM_SPINLOCK_ARRAY,
129 SMEM_SPINLOCK_ARRAY_SIZE);
130 if (spinlock_start == NULL)
131 return -ENXIO;
132
133 *lock = spinlock_start + id;
134
135 return 0;
136}
137
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600138void _remote_spin_release_all(uint32_t pid)
139{
140 remote_spin_release_all_locks(pid, SMEM_SPINLOCK_COUNT);
141}
142
143#endif
144
145/**
146 * Release all spinlocks owned by @pid.
147 *
148 * This is only to be used for situations where the processor owning
149 * spinlocks has crashed and the spinlocks must be released.
150 *
151 * @pid - processor ID of processor to release
152 */
153static void remote_spin_release_all_locks(uint32_t pid, int count)
154{
155 int n;
156 _remote_spinlock_t lock;
157
158 for (n = 0; n < count; ++n) {
159 if (remote_spinlock_init_address(n, &lock) == 0)
160 _remote_spin_release(&lock, pid);
161 }
162}
163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164static int
165remote_spinlock_dal_init(const char *chunk_name, _remote_spinlock_t *lock)
166{
167 void *dal_smem_start, *dal_smem_end;
168 uint32_t dal_smem_size;
169 struct dal_chunk_header *cur_header;
170
171 if (!chunk_name)
172 return -EINVAL;
173
174 dal_smem_start = smem_get_entry(SMEM_DAL_AREA, &dal_smem_size);
175 if (!dal_smem_start)
176 return -ENXIO;
177
178 dal_smem_end = dal_smem_start + dal_smem_size;
179
180 /* Find first chunk header */
181 cur_header = (struct dal_chunk_header *)
182 (((uint32_t)dal_smem_start + (4095)) & ~4095);
183 *lock = NULL;
184 while (cur_header->size != 0
185 && ((uint32_t)(cur_header + 1) < (uint32_t)dal_smem_end)) {
186
187 /* Check if chunk name matches */
188 if (!strncmp(cur_header->name, chunk_name,
189 DAL_CHUNK_NAME_LENGTH)) {
190 *lock = (_remote_spinlock_t)&cur_header->lock;
191 return 0;
192 }
193 cur_header = (void *)cur_header + cur_header->size;
194 }
195
196 pr_err("%s: DAL remote lock \"%s\" not found.\n", __func__,
197 chunk_name);
198 return -EINVAL;
199}
200
201int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
202{
203 BUG_ON(id == NULL);
204
205 if (id[0] == 'D' && id[1] == ':') {
206 /* DAL chunk name starts after "D:" */
207 return remote_spinlock_dal_init(&id[2], lock);
208 } else if (id[0] == 'S' && id[1] == ':') {
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600209 /* Single-digit lock ID follows "S:" */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 BUG_ON(id[3] != '\0');
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600211
212 return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
213 lock);
214 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 return -EINVAL;
Eric Holmbergf9cfa8e2011-09-23 14:29:11 -0600216 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217}
218
219int _remote_mutex_init(struct remote_mutex_id *id, _remote_mutex_t *lock)
220{
221 BUG_ON(id == NULL);
222
223 lock->delay_us = id->delay_us;
224 return _remote_spin_lock_init(id->r_spinlock_id, &(lock->r_spinlock));
225}
226EXPORT_SYMBOL(_remote_mutex_init);
227
228void _remote_mutex_lock(_remote_mutex_t *lock)
229{
230 while (!_remote_spin_trylock(&(lock->r_spinlock))) {
231 if (lock->delay_us >= 1000)
232 msleep(lock->delay_us/1000);
233 else
234 udelay(lock->delay_us);
235 }
236}
237EXPORT_SYMBOL(_remote_mutex_lock);
238
239void _remote_mutex_unlock(_remote_mutex_t *lock)
240{
241 _remote_spin_unlock(&(lock->r_spinlock));
242}
243EXPORT_SYMBOL(_remote_mutex_unlock);
244
245int _remote_mutex_trylock(_remote_mutex_t *lock)
246{
247 return _remote_spin_trylock(&(lock->r_spinlock));
248}
249EXPORT_SYMBOL(_remote_mutex_trylock);