blob: 22046099c2d56bc0098e19d383b1f13dadf2f4a2 [file] [log] [blame]
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06001/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/moduleparam.h>
17#include <linux/printk.h>
18
19#include <mach/board.h>
20#include <mach/msm_iomap.h>
21#include <mach/msm_smem.h>
22#include <mach/ramdump.h>
23#include <mach/subsystem_notif.h>
24
25#include "smem_private.h"
26
27/**
28 * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
29 *
30 * @type: type to check for overflow
31 * @a: left value to use
32 * @b: right value to use
33 * @returns: true if a + b will result in overflow; false otherwise
34 */
35#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
36 (((type)~0 - (a)) < (b) ? true : false)
37
38enum {
39 MSM_SMEM_DEBUG = 1U << 0,
40 MSM_SMEM_INFO = 1U << 1,
41};
42
43static int msm_smem_debug_mask;
44module_param_named(debug_mask, msm_smem_debug_mask,
45 int, S_IRUGO | S_IWUSR | S_IWGRP);
46
47#define SMEM_DBG(x...) do { \
48 if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
49 pr_debug(x); \
50 } while (0)
51
52remote_spinlock_t remote_spinlock;
53int spinlocks_initialized;
54uint32_t num_smem_areas;
55struct smem_area *smem_areas;
56struct ramdump_segment *smem_ramdump_segments;
57
58static void *smem_ramdump_dev;
Jeff Hugob9fb9402013-05-15 09:58:54 -060059static DEFINE_MUTEX(spinlock_init_lock);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060060
61struct restart_notifier_block {
62 unsigned processor;
63 char *name;
64 struct notifier_block nb;
65};
66
67static int restart_notifier_cb(struct notifier_block *this,
68 unsigned long code,
69 void *data);
70
71static struct restart_notifier_block restart_notifiers[] = {
72 {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
73 {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
74 {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
75 {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
76 {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
77 {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
78};
79
80/**
81 * smem_phys_to_virt() - Convert a physical base and offset to virtual address
82 *
83 * @base: physical base address to check
84 * @offset: offset from the base to get the final address
85 * @returns: virtual SMEM address; NULL for failure
86 *
87 * Takes a physical address and an offset and checks if the resulting physical
88 * address would fit into one of the smem regions. If so, returns the
89 * corresponding virtual address. Otherwise returns NULL.
90 */
91static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
92{
93 int i;
94 phys_addr_t phys_addr;
95 resource_size_t size;
96
97 if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
98 return NULL;
99
100 if (!smem_areas) {
101 /*
102 * Early boot - no area configuration yet, so default
103 * to using the main memory region.
104 *
105 * To remove the MSM_SHARED_RAM_BASE and the static
106 * mapping of SMEM in the future, add dump_stack()
107 * to identify the early callers of smem_get_entry()
108 * (which calls this function) and replace those calls
109 * with a new function that knows how to lookup the
110 * SMEM base address before SMEM has been probed.
111 */
112 phys_addr = msm_shared_ram_phys;
113 size = MSM_SHARED_RAM_SIZE;
114
115 if (base >= phys_addr && base + offset < phys_addr + size) {
116 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
117 (uintptr_t)MSM_SHARED_RAM_BASE, offset)) {
118 pr_err("%s: overflow %p %x\n", __func__,
119 MSM_SHARED_RAM_BASE, offset);
120 return NULL;
121 }
122
123 return MSM_SHARED_RAM_BASE + offset;
124 } else {
125 return NULL;
126 }
127 }
128 for (i = 0; i < num_smem_areas; ++i) {
129 phys_addr = smem_areas[i].phys_addr;
130 size = smem_areas[i].size;
131
132 if (base < phys_addr || base + offset >= phys_addr + size)
133 continue;
134
135 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
136 (uintptr_t)smem_areas[i].virt_addr, offset)) {
137 pr_err("%s: overflow %p %x\n", __func__,
138 smem_areas[i].virt_addr, offset);
139 return NULL;
140 }
141
142 return smem_areas[i].virt_addr + offset;
143 }
144
145 return NULL;
146}
147
148/**
149 * smem_virt_to_phys() - Convert SMEM address to physical address.
150 *
151 * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
152 * @returns: Physical address (or NULL if there is a failure)
153 *
154 * This function should only be used if an SMEM item needs to be handed
155 * off to a DMA engine.
156 */
157phys_addr_t smem_virt_to_phys(void *smem_address)
158{
159 phys_addr_t phys_addr = 0;
160 int i;
161 void *vend;
162
163 if (!smem_areas)
164 return phys_addr;
165
166 for (i = 0; i < num_smem_areas; ++i) {
167 vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
168
169 if (smem_address >= smem_areas[i].virt_addr &&
170 smem_address < vend) {
171 phys_addr = smem_address - smem_areas[i].virt_addr;
172 phys_addr += smem_areas[i].phys_addr;
173 break;
174 }
175 }
176
177 return phys_addr;
178}
179EXPORT_SYMBOL(smem_virt_to_phys);
180
181/* smem_alloc returns the pointer to smem item if it is already allocated.
182 * Otherwise, it returns NULL.
183 */
184void *smem_alloc(unsigned id, unsigned size)
185{
186 return smem_find(id, size);
187}
188EXPORT_SYMBOL(smem_alloc);
189
190void *smem_find(unsigned id, unsigned size_in)
191{
192 unsigned size;
193 void *ptr;
194
195 ptr = smem_get_entry(id, &size);
196 if (!ptr)
197 return 0;
198
199 size_in = ALIGN(size_in, 8);
200 if (size_in != size) {
201 pr_err("smem_find(%d, %d): wrong size %d\n",
202 id, size_in, size);
203 return 0;
204 }
205
206 return ptr;
207}
208EXPORT_SYMBOL(smem_find);
209
210/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
211 * it allocates it and then returns the pointer to it.
212 */
213void *smem_alloc2(unsigned id, unsigned size_in)
214{
215 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
216 struct smem_heap_entry *toc = shared->heap_toc;
217 unsigned long flags;
218 void *ret = NULL;
Jeff Hugob9fb9402013-05-15 09:58:54 -0600219 int rc;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600220
221 if (!shared->heap_info.initialized) {
222 pr_err("%s: smem heap info not initialized\n", __func__);
223 return NULL;
224 }
225
226 if (id >= SMEM_NUM_ITEMS)
227 return NULL;
228
Jeff Hugob9fb9402013-05-15 09:58:54 -0600229 if (unlikely(!spinlocks_initialized)) {
230 rc = init_smem_remote_spinlock();
231 if (unlikely(rc)) {
232 pr_err("%s: remote spinlock init failed %d\n",
233 __func__, rc);
234 return NULL;
235 }
236 }
237
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600238 size_in = ALIGN(size_in, 8);
239 remote_spin_lock_irqsave(&remote_spinlock, flags);
240 if (toc[id].allocated) {
241 SMEM_DBG("%s: %u already allocated\n", __func__, id);
242 if (size_in != toc[id].size)
243 pr_err("%s: wrong size %u (expected %u)\n",
244 __func__, toc[id].size, size_in);
245 else
246 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
247 } else if (id > SMEM_FIXED_ITEM_LAST) {
248 SMEM_DBG("%s: allocating %u\n", __func__, id);
249 if (shared->heap_info.heap_remaining >= size_in) {
250 toc[id].offset = shared->heap_info.free_offset;
251 toc[id].size = size_in;
252 wmb();
253 toc[id].allocated = 1;
254
255 shared->heap_info.free_offset += size_in;
256 shared->heap_info.heap_remaining -= size_in;
257 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
258 } else
259 pr_err("%s: not enough memory %u (required %u)\n",
260 __func__, shared->heap_info.heap_remaining,
261 size_in);
262 }
263 wmb();
264 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
265 return ret;
266}
267EXPORT_SYMBOL(smem_alloc2);
268
269void *smem_get_entry(unsigned id, unsigned *size)
270{
271 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
272 struct smem_heap_entry *toc = shared->heap_toc;
273 int use_spinlocks = spinlocks_initialized;
274 void *ret = 0;
275 unsigned long flags = 0;
276
277 if (id >= SMEM_NUM_ITEMS)
278 return ret;
279
280 if (use_spinlocks)
281 remote_spin_lock_irqsave(&remote_spinlock, flags);
282 /* toc is in device memory and cannot be speculatively accessed */
283 if (toc[id].allocated) {
284 phys_addr_t phys_base;
285
286 *size = toc[id].size;
287 barrier();
288
289 phys_base = toc[id].reserved & BASE_ADDR_MASK;
290 if (!phys_base)
291 phys_base = (phys_addr_t)msm_shared_ram_phys;
292 ret = smem_phys_to_virt(phys_base, toc[id].offset);
293 } else {
294 *size = 0;
295 }
296 if (use_spinlocks)
297 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
298
299 return ret;
300}
301EXPORT_SYMBOL(smem_get_entry);
302
303
304/**
305 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
306 *
307 * @returns: pointer to SMEM remote spinlock
308 */
309remote_spinlock_t *smem_get_remote_spinlock(void)
310{
311 return &remote_spinlock;
312}
313EXPORT_SYMBOL(smem_get_remote_spinlock);
314
Jeff Hugob9fb9402013-05-15 09:58:54 -0600315/**
316 * init_smem_remote_spinlock - Reentrant remote spinlock initialization
317 *
318 * @returns: sucess or error code for failure
319 */
320int init_smem_remote_spinlock(void)
321{
322 int rc = 0;
323
324 /*
325 * Optimistic locking. Init only needs to be done once by the first
326 * caller. After that, serializing inits between different callers
327 * is unnecessary. The second check after the lock ensures init
328 * wasn't previously completed by someone else before the lock could
329 * be grabbed.
330 */
331 if (!spinlocks_initialized) {
332 mutex_lock(&spinlock_init_lock);
333 if (!spinlocks_initialized) {
334 rc = remote_spin_lock_init(&remote_spinlock,
335 SMEM_SPINLOCK_SMEM_ALLOC);
336 if (!rc)
337 spinlocks_initialized = 1;
338 }
339 mutex_unlock(&spinlock_init_lock);
340 }
341 return rc;
342}
343
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600344static int restart_notifier_cb(struct notifier_block *this,
345 unsigned long code,
346 void *data)
347{
348 if (code == SUBSYS_AFTER_SHUTDOWN) {
349 struct restart_notifier_block *notifier;
350
351 notifier = container_of(this,
352 struct restart_notifier_block, nb);
353 SMEM_DBG("%s: ssrestart for processor %d ('%s')\n",
354 __func__, notifier->processor,
355 notifier->name);
356
357 remote_spin_release(&remote_spinlock, notifier->processor);
358 remote_spin_release_all(notifier->processor);
359
360 if (smem_ramdump_dev) {
361 int ret;
362
363 SMEM_DBG("%s: saving ramdump\n", __func__);
364 /*
365 * XPU protection does not currently allow the
366 * auxiliary memory regions to be dumped. If this
367 * changes, then num_smem_areas + 1 should be passed
368 * into do_elf_ramdump() to dump all regions.
369 */
370 ret = do_elf_ramdump(smem_ramdump_dev,
371 smem_ramdump_segments, 1);
372 if (ret < 0)
373 pr_err("%s: unable to dump smem %d\n", __func__,
374 ret);
375 }
376 }
377
378 return NOTIFY_DONE;
379}
380
381static __init int modem_restart_late_init(void)
382{
383 int i;
384 void *handle;
385 struct restart_notifier_block *nb;
386
387 smem_ramdump_dev = create_ramdump_device("smem", NULL);
388 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
389 pr_err("%s: Unable to create smem ramdump device.\n",
390 __func__);
391 smem_ramdump_dev = NULL;
392 }
393
394 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
395 nb = &restart_notifiers[i];
396 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
397 SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
398 __func__, nb->name, handle);
399 }
400
401 return 0;
402}
403late_initcall(modem_restart_late_init);