blob: bbb6ce0ed9a73bf44138383850f8e7128e378ebf [file] [log] [blame]
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06001/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/moduleparam.h>
17#include <linux/printk.h>
18
19#include <mach/board.h>
20#include <mach/msm_iomap.h>
21#include <mach/msm_smem.h>
22#include <mach/ramdump.h>
23#include <mach/subsystem_notif.h>
24
25#include "smem_private.h"
26
27/**
28 * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
29 *
30 * @type: type to check for overflow
31 * @a: left value to use
32 * @b: right value to use
33 * @returns: true if a + b will result in overflow; false otherwise
34 */
35#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
36 (((type)~0 - (a)) < (b) ? true : false)
37
Jeff Hugo429dc2c2013-05-28 15:06:07 -060038#define MODEM_SBL_VERSION_INDEX 7
39#define SMEM_VERSION_INFO_SIZE (32 * 4)
40#define SMEM_VERSION 0x000B
41
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060042enum {
43 MSM_SMEM_DEBUG = 1U << 0,
44 MSM_SMEM_INFO = 1U << 1,
45};
46
47static int msm_smem_debug_mask;
48module_param_named(debug_mask, msm_smem_debug_mask,
49 int, S_IRUGO | S_IWUSR | S_IWGRP);
50
51#define SMEM_DBG(x...) do { \
52 if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
53 pr_debug(x); \
54 } while (0)
55
56remote_spinlock_t remote_spinlock;
57int spinlocks_initialized;
58uint32_t num_smem_areas;
59struct smem_area *smem_areas;
60struct ramdump_segment *smem_ramdump_segments;
61
62static void *smem_ramdump_dev;
Jeff Hugob9fb9402013-05-15 09:58:54 -060063static DEFINE_MUTEX(spinlock_init_lock);
Jeff Hugo429dc2c2013-05-28 15:06:07 -060064static DEFINE_SPINLOCK(smem_init_check_lock);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060065
66struct restart_notifier_block {
67 unsigned processor;
68 char *name;
69 struct notifier_block nb;
70};
71
72static int restart_notifier_cb(struct notifier_block *this,
73 unsigned long code,
74 void *data);
75
76static struct restart_notifier_block restart_notifiers[] = {
77 {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
78 {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
79 {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
80 {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
81 {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
82 {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
83};
84
85/**
86 * smem_phys_to_virt() - Convert a physical base and offset to virtual address
87 *
88 * @base: physical base address to check
89 * @offset: offset from the base to get the final address
90 * @returns: virtual SMEM address; NULL for failure
91 *
92 * Takes a physical address and an offset and checks if the resulting physical
93 * address would fit into one of the smem regions. If so, returns the
94 * corresponding virtual address. Otherwise returns NULL.
95 */
96static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
97{
98 int i;
99 phys_addr_t phys_addr;
100 resource_size_t size;
101
102 if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
103 return NULL;
104
105 if (!smem_areas) {
106 /*
107 * Early boot - no area configuration yet, so default
108 * to using the main memory region.
109 *
110 * To remove the MSM_SHARED_RAM_BASE and the static
111 * mapping of SMEM in the future, add dump_stack()
112 * to identify the early callers of smem_get_entry()
113 * (which calls this function) and replace those calls
114 * with a new function that knows how to lookup the
115 * SMEM base address before SMEM has been probed.
116 */
117 phys_addr = msm_shared_ram_phys;
118 size = MSM_SHARED_RAM_SIZE;
119
120 if (base >= phys_addr && base + offset < phys_addr + size) {
121 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
122 (uintptr_t)MSM_SHARED_RAM_BASE, offset)) {
123 pr_err("%s: overflow %p %x\n", __func__,
124 MSM_SHARED_RAM_BASE, offset);
125 return NULL;
126 }
127
128 return MSM_SHARED_RAM_BASE + offset;
129 } else {
130 return NULL;
131 }
132 }
133 for (i = 0; i < num_smem_areas; ++i) {
134 phys_addr = smem_areas[i].phys_addr;
135 size = smem_areas[i].size;
136
137 if (base < phys_addr || base + offset >= phys_addr + size)
138 continue;
139
140 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
141 (uintptr_t)smem_areas[i].virt_addr, offset)) {
142 pr_err("%s: overflow %p %x\n", __func__,
143 smem_areas[i].virt_addr, offset);
144 return NULL;
145 }
146
147 return smem_areas[i].virt_addr + offset;
148 }
149
150 return NULL;
151}
152
153/**
154 * smem_virt_to_phys() - Convert SMEM address to physical address.
155 *
156 * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
157 * @returns: Physical address (or NULL if there is a failure)
158 *
159 * This function should only be used if an SMEM item needs to be handed
160 * off to a DMA engine.
161 */
162phys_addr_t smem_virt_to_phys(void *smem_address)
163{
164 phys_addr_t phys_addr = 0;
165 int i;
166 void *vend;
167
168 if (!smem_areas)
169 return phys_addr;
170
171 for (i = 0; i < num_smem_areas; ++i) {
172 vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
173
174 if (smem_address >= smem_areas[i].virt_addr &&
175 smem_address < vend) {
176 phys_addr = smem_address - smem_areas[i].virt_addr;
177 phys_addr += smem_areas[i].phys_addr;
178 break;
179 }
180 }
181
182 return phys_addr;
183}
184EXPORT_SYMBOL(smem_virt_to_phys);
185
186/* smem_alloc returns the pointer to smem item if it is already allocated.
187 * Otherwise, it returns NULL.
188 */
189void *smem_alloc(unsigned id, unsigned size)
190{
191 return smem_find(id, size);
192}
193EXPORT_SYMBOL(smem_alloc);
194
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600195static void *__smem_get_entry(unsigned id, unsigned *size, bool skip_init_check)
196{
197 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
198 struct smem_heap_entry *toc = shared->heap_toc;
199 int use_spinlocks = spinlocks_initialized;
200 void *ret = 0;
201 unsigned long flags = 0;
202
203 if (!skip_init_check && !smem_initialized_check())
204 return ret;
205
206 if (id >= SMEM_NUM_ITEMS)
207 return ret;
208
209 if (use_spinlocks)
210 remote_spin_lock_irqsave(&remote_spinlock, flags);
211 /* toc is in device memory and cannot be speculatively accessed */
212 if (toc[id].allocated) {
213 phys_addr_t phys_base;
214
215 *size = toc[id].size;
216 barrier();
217
218 phys_base = toc[id].reserved & BASE_ADDR_MASK;
219 if (!phys_base)
220 phys_base = (phys_addr_t)msm_shared_ram_phys;
221 ret = smem_phys_to_virt(phys_base, toc[id].offset);
222 } else {
223 *size = 0;
224 }
225 if (use_spinlocks)
226 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
227
228 return ret;
229}
230
231static void *__smem_find(unsigned id, unsigned size_in, bool skip_init_check)
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600232{
233 unsigned size;
234 void *ptr;
235
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600236 ptr = __smem_get_entry(id, &size, skip_init_check);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600237 if (!ptr)
238 return 0;
239
240 size_in = ALIGN(size_in, 8);
241 if (size_in != size) {
242 pr_err("smem_find(%d, %d): wrong size %d\n",
243 id, size_in, size);
244 return 0;
245 }
246
247 return ptr;
248}
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600249
250void *smem_find(unsigned id, unsigned size_in)
251{
252 return __smem_find(id, size_in, false);
253}
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600254EXPORT_SYMBOL(smem_find);
255
256/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
257 * it allocates it and then returns the pointer to it.
258 */
259void *smem_alloc2(unsigned id, unsigned size_in)
260{
261 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
262 struct smem_heap_entry *toc = shared->heap_toc;
263 unsigned long flags;
264 void *ret = NULL;
Jeff Hugob9fb9402013-05-15 09:58:54 -0600265 int rc;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600266
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600267 if (!smem_initialized_check())
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600268 return NULL;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600269
270 if (id >= SMEM_NUM_ITEMS)
271 return NULL;
272
Jeff Hugob9fb9402013-05-15 09:58:54 -0600273 if (unlikely(!spinlocks_initialized)) {
274 rc = init_smem_remote_spinlock();
275 if (unlikely(rc)) {
276 pr_err("%s: remote spinlock init failed %d\n",
277 __func__, rc);
278 return NULL;
279 }
280 }
281
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600282 size_in = ALIGN(size_in, 8);
283 remote_spin_lock_irqsave(&remote_spinlock, flags);
284 if (toc[id].allocated) {
285 SMEM_DBG("%s: %u already allocated\n", __func__, id);
286 if (size_in != toc[id].size)
287 pr_err("%s: wrong size %u (expected %u)\n",
288 __func__, toc[id].size, size_in);
289 else
290 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
291 } else if (id > SMEM_FIXED_ITEM_LAST) {
292 SMEM_DBG("%s: allocating %u\n", __func__, id);
293 if (shared->heap_info.heap_remaining >= size_in) {
294 toc[id].offset = shared->heap_info.free_offset;
295 toc[id].size = size_in;
296 wmb();
297 toc[id].allocated = 1;
298
299 shared->heap_info.free_offset += size_in;
300 shared->heap_info.heap_remaining -= size_in;
301 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
302 } else
303 pr_err("%s: not enough memory %u (required %u)\n",
304 __func__, shared->heap_info.heap_remaining,
305 size_in);
306 }
307 wmb();
308 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
309 return ret;
310}
311EXPORT_SYMBOL(smem_alloc2);
312
313void *smem_get_entry(unsigned id, unsigned *size)
314{
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600315 return __smem_get_entry(id, size, false);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600316}
317EXPORT_SYMBOL(smem_get_entry);
318
319
320/**
321 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
322 *
323 * @returns: pointer to SMEM remote spinlock
324 */
325remote_spinlock_t *smem_get_remote_spinlock(void)
326{
327 return &remote_spinlock;
328}
329EXPORT_SYMBOL(smem_get_remote_spinlock);
330
Jeff Hugob9fb9402013-05-15 09:58:54 -0600331/**
332 * init_smem_remote_spinlock - Reentrant remote spinlock initialization
333 *
334 * @returns: sucess or error code for failure
335 */
336int init_smem_remote_spinlock(void)
337{
338 int rc = 0;
339
340 /*
341 * Optimistic locking. Init only needs to be done once by the first
342 * caller. After that, serializing inits between different callers
343 * is unnecessary. The second check after the lock ensures init
344 * wasn't previously completed by someone else before the lock could
345 * be grabbed.
346 */
347 if (!spinlocks_initialized) {
348 mutex_lock(&spinlock_init_lock);
349 if (!spinlocks_initialized) {
350 rc = remote_spin_lock_init(&remote_spinlock,
351 SMEM_SPINLOCK_SMEM_ALLOC);
352 if (!rc)
353 spinlocks_initialized = 1;
354 }
355 mutex_unlock(&spinlock_init_lock);
356 }
357 return rc;
358}
359
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600360/**
361 * smem_initialized_check - Reentrant check that smem has been initialized
362 *
363 * @returns: true if initialized, false if not.
364 */
365bool smem_initialized_check(void)
366{
367 static int checked;
368 static int is_inited;
369 unsigned long flags;
370 struct smem_shared *smem;
371 int *version_array;
372
373 if (likely(checked)) {
374 if (unlikely(!is_inited))
375 pr_err("%s: smem not initialized\n", __func__);
376 return is_inited;
377 }
378
379 spin_lock_irqsave(&smem_init_check_lock, flags);
380 if (checked) {
381 spin_unlock_irqrestore(&smem_init_check_lock, flags);
382 if (unlikely(!is_inited))
383 pr_err("%s: smem not initialized\n", __func__);
384 return is_inited;
385 }
386
387 smem = (void *)MSM_SHARED_RAM_BASE;
388
389 if (smem->heap_info.initialized != 1)
390 goto failed;
391 if (smem->heap_info.reserved != 0)
392 goto failed;
393
394 version_array = __smem_find(SMEM_VERSION_INFO, SMEM_VERSION_INFO_SIZE,
395 true);
396 if (version_array == NULL)
397 goto failed;
398 if (version_array[MODEM_SBL_VERSION_INDEX] != SMEM_VERSION << 16)
399 goto failed;
400
401 is_inited = 1;
402 checked = 1;
403 spin_unlock_irqrestore(&smem_init_check_lock, flags);
404 return is_inited;
405
406failed:
407 is_inited = 0;
408 checked = 1;
409 spin_unlock_irqrestore(&smem_init_check_lock, flags);
410 pr_err("%s: bootloader failure detected, shared memory not inited\n",
411 __func__);
412 return is_inited;
413}
414EXPORT_SYMBOL(smem_initialized_check);
415
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600416static int restart_notifier_cb(struct notifier_block *this,
417 unsigned long code,
418 void *data)
419{
420 if (code == SUBSYS_AFTER_SHUTDOWN) {
421 struct restart_notifier_block *notifier;
422
423 notifier = container_of(this,
424 struct restart_notifier_block, nb);
425 SMEM_DBG("%s: ssrestart for processor %d ('%s')\n",
426 __func__, notifier->processor,
427 notifier->name);
428
429 remote_spin_release(&remote_spinlock, notifier->processor);
430 remote_spin_release_all(notifier->processor);
431
432 if (smem_ramdump_dev) {
433 int ret;
434
435 SMEM_DBG("%s: saving ramdump\n", __func__);
436 /*
437 * XPU protection does not currently allow the
438 * auxiliary memory regions to be dumped. If this
439 * changes, then num_smem_areas + 1 should be passed
440 * into do_elf_ramdump() to dump all regions.
441 */
442 ret = do_elf_ramdump(smem_ramdump_dev,
443 smem_ramdump_segments, 1);
444 if (ret < 0)
445 pr_err("%s: unable to dump smem %d\n", __func__,
446 ret);
447 }
448 }
449
450 return NOTIFY_DONE;
451}
452
453static __init int modem_restart_late_init(void)
454{
455 int i;
456 void *handle;
457 struct restart_notifier_block *nb;
458
459 smem_ramdump_dev = create_ramdump_device("smem", NULL);
460 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
461 pr_err("%s: Unable to create smem ramdump device.\n",
462 __func__);
463 smem_ramdump_dev = NULL;
464 }
465
466 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
467 nb = &restart_notifiers[i];
468 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
469 SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
470 __func__, nb->name, handle);
471 }
472
473 return 0;
474}
475late_initcall(modem_restart_late_init);