blob: f41240a69eecdc18ca2933e4e172f99ab601ec56 [file] [log] [blame]
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06001/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/moduleparam.h>
17#include <linux/printk.h>
Jeff Hugo7cc06b12013-06-17 16:13:18 -060018#include <linux/notifier.h>
Jeff Hugo63fa6062013-06-19 13:32:55 -060019#include <linux/of.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060020
21#include <mach/board.h>
22#include <mach/msm_iomap.h>
23#include <mach/msm_smem.h>
24#include <mach/ramdump.h>
25#include <mach/subsystem_notif.h>
Jeff Hugo63fa6062013-06-19 13:32:55 -060026#include <mach/msm_ipc_logging.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060027
28#include "smem_private.h"
29
30/**
31 * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
32 *
33 * @type: type to check for overflow
34 * @a: left value to use
35 * @b: right value to use
36 * @returns: true if a + b will result in overflow; false otherwise
37 */
38#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
39 (((type)~0 - (a)) < (b) ? true : false)
40
Jeff Hugo429dc2c2013-05-28 15:06:07 -060041#define MODEM_SBL_VERSION_INDEX 7
42#define SMEM_VERSION_INFO_SIZE (32 * 4)
43#define SMEM_VERSION 0x000B
44
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060045enum {
46 MSM_SMEM_DEBUG = 1U << 0,
47 MSM_SMEM_INFO = 1U << 1,
48};
49
Jeff Hugo63fa6062013-06-19 13:32:55 -060050static int msm_smem_debug_mask = MSM_SMEM_INFO;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060051module_param_named(debug_mask, msm_smem_debug_mask,
52 int, S_IRUGO | S_IWUSR | S_IWGRP);
Jeff Hugo63fa6062013-06-19 13:32:55 -060053static void *smem_ipc_log_ctx;
54#define NUM_LOG_PAGES 4
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060055
Jeff Hugo63fa6062013-06-19 13:32:55 -060056#define IPC_LOG(x...) do { \
57 if (smem_ipc_log_ctx) \
58 ipc_log_string(smem_ipc_log_ctx, x); \
59 } while (0)
60
61
62#define LOG_ERR(x...) do { \
63 pr_err(x); \
64 IPC_LOG(x); \
65 } while (0)
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060066#define SMEM_DBG(x...) do { \
67 if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
Jeff Hugo63fa6062013-06-19 13:32:55 -060068 IPC_LOG(x); \
69 } while (0)
70#define SMEM_INFO(x...) do { \
71 if (msm_smem_debug_mask & MSM_SMEM_INFO) \
72 IPC_LOG(x); \
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060073 } while (0)
74
Jeff Hugo7cc06b12013-06-17 16:13:18 -060075#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060076
Jeff Hugo7cc06b12013-06-17 16:13:18 -060077static remote_spinlock_t remote_spinlock;
78static uint32_t num_smem_areas;
79static struct smem_area *smem_areas;
80static struct ramdump_segment *smem_ramdump_segments;
81static int spinlocks_initialized;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060082static void *smem_ramdump_dev;
Jeff Hugob9fb9402013-05-15 09:58:54 -060083static DEFINE_MUTEX(spinlock_init_lock);
Jeff Hugo429dc2c2013-05-28 15:06:07 -060084static DEFINE_SPINLOCK(smem_init_check_lock);
Jeff Hugo7cc06b12013-06-17 16:13:18 -060085static int smem_module_inited;
86static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
87static DEFINE_MUTEX(smem_module_init_notifier_lock);
88
Jeff Hugo63fa6062013-06-19 13:32:55 -060089/* smem security feature components */
90#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
91#define SMEM_TOC_MAX_EXCLUSIONS 4
92#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
93#define SMEM_ALLOCATION_CANARY 0xa5a5
94
95struct smem_toc_entry {
96 uint32_t offset;
97 uint32_t size;
98 uint32_t flags;
99 uint16_t host0;
100 uint16_t host1;
101 uint32_t size_cacheline;
102 uint32_t reserved[3];
103 uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
104};
105
106struct smem_toc {
107 /* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
108 uint32_t identifier;
109 uint32_t version;
110 uint32_t num_entries;
111 uint32_t reserved[5];
112 struct smem_toc_entry entry[];
113};
114
115struct smem_partition_header {
116 /* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
117 uint32_t identifier;
118 uint16_t host0;
119 uint16_t host1;
120 uint32_t size;
121 uint32_t offset_free_uncached;
122 uint32_t offset_free_cached;
123 uint32_t reserved[3];
124};
125
126struct smem_partition_allocation_header {
127 /* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
128 uint16_t canary;
129 uint16_t smem_type;
130 uint32_t size; /* includes padding bytes */
131 uint16_t padding_data;
132 uint16_t padding_hdr;
133 uint32_t reserved[1];
134};
135
136struct smem_partition_info {
137 uint32_t partition_num;
138 uint32_t offset;
139 uint32_t size_cacheline;
140};
141
142static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
143/* end smem security feature components */
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600144
145struct restart_notifier_block {
146 unsigned processor;
147 char *name;
148 struct notifier_block nb;
149};
150
151static int restart_notifier_cb(struct notifier_block *this,
152 unsigned long code,
153 void *data);
154
155static struct restart_notifier_block restart_notifiers[] = {
156 {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
157 {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
158 {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
159 {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
160 {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
161 {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
162};
163
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600164static int init_smem_remote_spinlock(void);
165
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600166/**
167 * smem_phys_to_virt() - Convert a physical base and offset to virtual address
168 *
169 * @base: physical base address to check
170 * @offset: offset from the base to get the final address
171 * @returns: virtual SMEM address; NULL for failure
172 *
173 * Takes a physical address and an offset and checks if the resulting physical
174 * address would fit into one of the smem regions. If so, returns the
175 * corresponding virtual address. Otherwise returns NULL.
176 */
177static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
178{
179 int i;
180 phys_addr_t phys_addr;
181 resource_size_t size;
182
183 if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
184 return NULL;
185
186 if (!smem_areas) {
187 /*
188 * Early boot - no area configuration yet, so default
189 * to using the main memory region.
190 *
191 * To remove the MSM_SHARED_RAM_BASE and the static
192 * mapping of SMEM in the future, add dump_stack()
193 * to identify the early callers of smem_get_entry()
194 * (which calls this function) and replace those calls
195 * with a new function that knows how to lookup the
196 * SMEM base address before SMEM has been probed.
197 */
198 phys_addr = msm_shared_ram_phys;
199 size = MSM_SHARED_RAM_SIZE;
200
201 if (base >= phys_addr && base + offset < phys_addr + size) {
202 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
203 (uintptr_t)MSM_SHARED_RAM_BASE, offset)) {
Jeff Hugo63fa6062013-06-19 13:32:55 -0600204 SMEM_INFO("%s: overflow %p %x\n", __func__,
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600205 MSM_SHARED_RAM_BASE, offset);
206 return NULL;
207 }
208
209 return MSM_SHARED_RAM_BASE + offset;
210 } else {
211 return NULL;
212 }
213 }
214 for (i = 0; i < num_smem_areas; ++i) {
215 phys_addr = smem_areas[i].phys_addr;
216 size = smem_areas[i].size;
217
218 if (base < phys_addr || base + offset >= phys_addr + size)
219 continue;
220
221 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
222 (uintptr_t)smem_areas[i].virt_addr, offset)) {
Jeff Hugo63fa6062013-06-19 13:32:55 -0600223 SMEM_INFO("%s: overflow %p %x\n", __func__,
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600224 smem_areas[i].virt_addr, offset);
225 return NULL;
226 }
227
228 return smem_areas[i].virt_addr + offset;
229 }
230
231 return NULL;
232}
233
234/**
235 * smem_virt_to_phys() - Convert SMEM address to physical address.
236 *
237 * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
238 * @returns: Physical address (or NULL if there is a failure)
239 *
240 * This function should only be used if an SMEM item needs to be handed
241 * off to a DMA engine.
242 */
243phys_addr_t smem_virt_to_phys(void *smem_address)
244{
245 phys_addr_t phys_addr = 0;
246 int i;
247 void *vend;
248
249 if (!smem_areas)
250 return phys_addr;
251
252 for (i = 0; i < num_smem_areas; ++i) {
253 vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
254
255 if (smem_address >= smem_areas[i].virt_addr &&
256 smem_address < vend) {
257 phys_addr = smem_address - smem_areas[i].virt_addr;
258 phys_addr += smem_areas[i].phys_addr;
259 break;
260 }
261 }
262
263 return phys_addr;
264}
265EXPORT_SYMBOL(smem_virt_to_phys);
266
267/* smem_alloc returns the pointer to smem item if it is already allocated.
268 * Otherwise, it returns NULL.
269 */
270void *smem_alloc(unsigned id, unsigned size)
271{
272 return smem_find(id, size);
273}
274EXPORT_SYMBOL(smem_alloc);
275
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600276/**
Jeff Hugo63fa6062013-06-19 13:32:55 -0600277 * smem_alloc_to_proc - Find existing item with security support
278 *
279 * @id: ID of SMEM item
280 * @size: Size of the SMEM item
281 * @to_proc: SMEM host that shares the item with apps
282 * @flags: Item attribute flags
283 * @returns: Pointer to SMEM item or NULL if it doesn't exist
284 */
285void *smem_alloc_to_proc(unsigned id, unsigned size, unsigned to_proc,
286 unsigned flags)
287{
288 return smem_find_to_proc(id, size, to_proc, flags);
289}
290EXPORT_SYMBOL(smem_alloc_to_proc);
291
292/**
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600293 * __smem_get_entry - Get pointer and size of existing SMEM item
294 *
295 * @id: ID of SMEM item
296 * @size: Pointer to size variable for storing the result
297 * @skip_init_check: True means do not verify that SMEM has been initialized
298 * @use_rspinlock: True to use the remote spinlock
299 * @returns: Pointer to SMEM item or NULL if it doesn't exist
300 */
301static void *__smem_get_entry(unsigned id, unsigned *size,
302 bool skip_init_check, bool use_rspinlock)
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600303{
304 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
305 struct smem_heap_entry *toc = shared->heap_toc;
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600306 int use_spinlocks = spinlocks_initialized && use_rspinlock;
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600307 void *ret = 0;
308 unsigned long flags = 0;
309
310 if (!skip_init_check && !smem_initialized_check())
311 return ret;
312
313 if (id >= SMEM_NUM_ITEMS)
314 return ret;
315
316 if (use_spinlocks)
317 remote_spin_lock_irqsave(&remote_spinlock, flags);
318 /* toc is in device memory and cannot be speculatively accessed */
319 if (toc[id].allocated) {
320 phys_addr_t phys_base;
321
322 *size = toc[id].size;
323 barrier();
324
325 phys_base = toc[id].reserved & BASE_ADDR_MASK;
326 if (!phys_base)
327 phys_base = (phys_addr_t)msm_shared_ram_phys;
328 ret = smem_phys_to_virt(phys_base, toc[id].offset);
329 } else {
330 *size = 0;
331 }
332 if (use_spinlocks)
333 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
334
335 return ret;
336}
337
Jeff Hugo63fa6062013-06-19 13:32:55 -0600338/**
339 * __smem_get_entry_to_proc - Get pointer and size of existing SMEM item with
340 * security support
341 *
342 * @id: ID of SMEM item
343 * @size: Pointer to size variable for storing the result
344 * @to_proc: SMEM host that shares the item with apps
345 * @flags: Item attribute flags
346 * @skip_init_check: True means do not verify that SMEM has been initialized
347 * @use_rspinlock: True to use the remote spinlock
348 * @returns: Pointer to SMEM item or NULL if it doesn't exist
349 */
350static void *__smem_get_entry_to_proc(unsigned id,
351 unsigned *size,
352 unsigned to_proc,
353 unsigned flags,
354 bool skip_init_check,
355 bool use_rspinlock)
356{
357 struct smem_partition_header *hdr;
358 unsigned long lflags = 0;
359 void *item = NULL;
360 struct smem_partition_allocation_header *alloc_hdr;
361 uint32_t partition_num;
362 uint32_t a_hdr_size;
363 int rc;
364
365 SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
366 flags, skip_init_check, use_rspinlock);
367
368 if (!skip_init_check && !smem_initialized_check())
369 return NULL;
370
371 if (id >= SMEM_NUM_ITEMS) {
372 SMEM_INFO("%s: invalid id %d\n", __func__, id);
373 return NULL;
374 }
375
376 if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
377 SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
378 to_proc);
379 return NULL;
380 }
381
382 if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
383 return __smem_get_entry(id, size, skip_init_check,
384 use_rspinlock);
385
386 partition_num = partitions[to_proc].partition_num;
387 hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
388 if (unlikely(!spinlocks_initialized)) {
389 rc = init_smem_remote_spinlock();
390 if (unlikely(rc)) {
391 SMEM_INFO(
392 "%s: id:%u remote spinlock init failed %d\n",
393 __func__, id, rc);
394 return NULL;
395 }
396 }
397 if (use_rspinlock)
398 remote_spin_lock_irqsave(&remote_spinlock, lflags);
399 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
400 LOG_ERR(
401 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
402 __func__,
403 partition_num,
404 to_proc,
405 hdr);
406 BUG();
407 }
408
409 if (flags & SMEM_ITEM_CACHED_FLAG) {
410 a_hdr_size = ALIGN(sizeof(*alloc_hdr),
411 partitions[to_proc].size_cacheline);
412 for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
413 (void *)(alloc_hdr) > (void *)(hdr) +
414 hdr->offset_free_cached;
415 alloc_hdr = (void *)(alloc_hdr) -
416 alloc_hdr->size - a_hdr_size) {
417 if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
418 LOG_ERR(
419 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
420 __func__,
421 partition_num,
422 to_proc,
423 alloc_hdr);
424 BUG();
425
426 }
427 if (alloc_hdr->smem_type == id) {
428 /* 8 byte alignment to match legacy */
429 *size = ALIGN(alloc_hdr->size -
430 alloc_hdr->padding_data, 8);
431 item = (void *)(alloc_hdr) - alloc_hdr->size;
432 break;
433 }
434 }
435 } else {
436 for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
437 (void *)(alloc_hdr) < (void *)(hdr) +
438 hdr->offset_free_uncached;
439 alloc_hdr = (void *)(alloc_hdr) +
440 sizeof(*alloc_hdr) +
441 alloc_hdr->padding_hdr +
442 alloc_hdr->size) {
443 if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
444 LOG_ERR(
445 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
446 __func__,
447 partition_num,
448 to_proc,
449 alloc_hdr);
450 BUG();
451
452 }
453 if (alloc_hdr->smem_type == id) {
454 /* 8 byte alignment to match legacy */
455 *size = ALIGN(alloc_hdr->size -
456 alloc_hdr->padding_data, 8);
457 item = (void *)(alloc_hdr) +
458 sizeof(*alloc_hdr) +
459 alloc_hdr->padding_hdr;
460 break;
461 }
462 }
463 }
464 if (use_rspinlock)
465 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
466
467 return item;
468}
469
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600470static void *__smem_find(unsigned id, unsigned size_in, bool skip_init_check)
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600471{
472 unsigned size;
473 void *ptr;
474
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600475 ptr = __smem_get_entry(id, &size, skip_init_check, true);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600476 if (!ptr)
477 return 0;
478
479 size_in = ALIGN(size_in, 8);
480 if (size_in != size) {
Jeff Hugo63fa6062013-06-19 13:32:55 -0600481 SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600482 id, size_in, size);
483 return 0;
484 }
485
486 return ptr;
487}
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600488
489void *smem_find(unsigned id, unsigned size_in)
490{
491 return __smem_find(id, size_in, false);
492}
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600493EXPORT_SYMBOL(smem_find);
494
Jeff Hugo63fa6062013-06-19 13:32:55 -0600495/**
496 * smem_find_to_proc - Find existing item with security support
497 *
498 * @id: ID of SMEM item
499 * @size_in: Size of the SMEM item
500 * @to_proc: SMEM host that shares the item with apps
501 * @flags: Item attribute flags
502 * @returns: Pointer to SMEM item or NULL if it doesn't exist
503 */
504void *smem_find_to_proc(unsigned id, unsigned size_in, unsigned to_proc,
505 unsigned flags)
506{
507 unsigned size;
508 void *ptr;
509
510 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
511 flags);
512
513 ptr = smem_get_entry_to_proc(id, &size, to_proc, flags);
514 if (!ptr)
515 return 0;
516
517 size_in = ALIGN(size_in, 8);
518 if (size_in != size) {
519 SMEM_INFO("smem_find_to_proc(%u, %u, %u, %u): wrong size %u\n",
520 id, size_in, to_proc, flags, size);
521 return 0;
522 }
523
524 return ptr;
525}
526EXPORT_SYMBOL(smem_find_to_proc);
527
528/**
529 * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
530 *
531 * @id: ID of SMEM item
532 * @size_in: Size to allocate
533 * @returns: Pointer to SMEM item or NULL for error
534 *
535 * Assumes the id parameter is valid and does not already exist. Assumes
536 * size_in is already adjusted for alignment, if necessary. Requires the
537 * remote spinlock to already be locked.
538 */
539static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
540{
541 void *smem_base = MSM_SHARED_RAM_BASE;
542 struct smem_shared *shared = smem_base;
543 struct smem_heap_entry *toc = shared->heap_toc;
544 void *ret = NULL;
545
546 if (shared->heap_info.heap_remaining >= size_in) {
547 toc[id].offset = shared->heap_info.free_offset;
548 toc[id].size = size_in;
549 /*
550 * wmb() is necessary to ensure the allocation data is
551 * consistent before setting the allocated flag to prevent race
552 * conditions with remote processors
553 */
554 wmb();
555 toc[id].allocated = 1;
556
557 shared->heap_info.free_offset += size_in;
558 shared->heap_info.heap_remaining -= size_in;
559 ret = smem_base + toc[id].offset;
560 /*
561 * wmb() is necessary to ensure the heap data is consistent
562 * before continuing to prevent race conditions with remote
563 * processors
564 */
565 wmb();
566 } else {
567 SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
568 __func__, id, shared->heap_info.heap_remaining,
569 size_in);
570 }
571
572 return ret;
573}
574
575/**
576 * alloc_item_secure - Allocate an SMEM item in a secure partition
577 *
578 * @id: ID of SMEM item
579 * @size_in: Size to allocate
580 * @to_proc: SMEM host that shares the item with apps
581 * @flags: Item attribute flags
582 * @returns: Pointer to SMEM item or NULL for error
583 *
584 * Assumes the id parameter is valid and does not already exist. Assumes
585 * size_in is the raw size requested by the client. Assumes to_proc is a valid
586 * host, and a valid partition to that host exists. Requires the remote
587 * spinlock to already be locked.
588 */
589static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
590 unsigned flags)
591{
592 void *smem_base = MSM_SHARED_RAM_BASE;
593 struct smem_partition_header *hdr;
594 struct smem_partition_allocation_header *alloc_hdr;
595 uint32_t a_hdr_size;
596 uint32_t a_data_size;
597 uint32_t size_cacheline;
598 uint32_t free_space;
599 uint32_t partition_num;
600 void *ret = NULL;
601
602 hdr = smem_base + partitions[to_proc].offset;
603 partition_num = partitions[to_proc].partition_num;
604
605 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
606 LOG_ERR(
607 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
608 __func__,
609 partition_num,
610 to_proc,
611 hdr);
612 BUG();
613 }
614
615 size_cacheline = partitions[to_proc].size_cacheline;
616 free_space = hdr->offset_free_cached -
617 hdr->offset_free_uncached;
618
619 if (flags & SMEM_ITEM_CACHED_FLAG) {
620 a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
621 a_data_size = ALIGN(size_in, size_cacheline);
622 if (free_space < a_hdr_size + a_data_size) {
623 SMEM_INFO(
624 "%s: id %u not enough memory %u (required %u)\n",
625 __func__, id, free_space,
626 a_hdr_size + a_data_size);
627 return ret;
628 }
629 alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
630 a_hdr_size;
631 alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
632 alloc_hdr->smem_type = id;
633 alloc_hdr->size = a_data_size;
634 alloc_hdr->padding_data = a_data_size - size_in;
635 alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
636 hdr->offset_free_cached = hdr->offset_free_cached -
637 a_hdr_size - a_data_size;
638 ret = (void *)(alloc_hdr) - a_data_size;
639 /*
640 * The SMEM protocol currently does not support cacheable
641 * areas within the smem region, but if it ever does in the
642 * future, then cache management needs to be done here.
643 * The area of memory this item is allocated from will need to
644 * be dynamically made cachable, and a cache flush of the
645 * allocation header using __cpuc_flush_dcache_area and
646 * outer_flush_area will need to be done.
647 */
648 } else {
649 a_hdr_size = sizeof(*alloc_hdr);
650 a_data_size = ALIGN(size_in, 8);
651 if (free_space < a_hdr_size + a_data_size) {
652 SMEM_INFO(
653 "%s: id %u not enough memory %u (required %u)\n",
654 __func__, id, free_space,
655 a_hdr_size + a_data_size);
656 return ret;
657 }
658 alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
659 alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
660 alloc_hdr->smem_type = id;
661 alloc_hdr->size = a_data_size;
662 alloc_hdr->padding_data = a_data_size - size_in;
663 alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
664 hdr->offset_free_uncached = hdr->offset_free_uncached +
665 a_hdr_size + a_data_size;
666 ret = alloc_hdr + 1;
667 }
668 /*
669 * wmb() is necessary to ensure the heap and allocation data is
670 * consistent before continuing to prevent race conditions with remote
671 * processors
672 */
673 wmb();
674
675 return ret;
676}
677
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600678/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
679 * it allocates it and then returns the pointer to it.
680 */
681void *smem_alloc2(unsigned id, unsigned size_in)
682{
683 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
684 struct smem_heap_entry *toc = shared->heap_toc;
685 unsigned long flags;
686 void *ret = NULL;
Jeff Hugob9fb9402013-05-15 09:58:54 -0600687 int rc;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600688
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600689 if (!smem_initialized_check())
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600690 return NULL;
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600691
Jeff Hugo63fa6062013-06-19 13:32:55 -0600692 if (id >= SMEM_NUM_ITEMS) {
693 SMEM_INFO("%s: invalid id %u\n", __func__, id);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600694 return NULL;
Jeff Hugo63fa6062013-06-19 13:32:55 -0600695 }
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600696
Jeff Hugob9fb9402013-05-15 09:58:54 -0600697 if (unlikely(!spinlocks_initialized)) {
698 rc = init_smem_remote_spinlock();
699 if (unlikely(rc)) {
Jeff Hugo63fa6062013-06-19 13:32:55 -0600700 SMEM_INFO("%s: remote spinlock init failed %d\n",
Jeff Hugob9fb9402013-05-15 09:58:54 -0600701 __func__, rc);
702 return NULL;
703 }
704 }
705
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600706 size_in = ALIGN(size_in, 8);
707 remote_spin_lock_irqsave(&remote_spinlock, flags);
708 if (toc[id].allocated) {
Jeff Hugo63fa6062013-06-19 13:32:55 -0600709 SMEM_INFO("%s: %u already allocated\n", __func__, id);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600710 if (size_in != toc[id].size)
Jeff Hugo63fa6062013-06-19 13:32:55 -0600711 SMEM_INFO("%s: wrong size %u (expected %u)\n",
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600712 __func__, toc[id].size, size_in);
713 else
714 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
715 } else if (id > SMEM_FIXED_ITEM_LAST) {
Jeff Hugo63fa6062013-06-19 13:32:55 -0600716 SMEM_INFO("%s: allocating %u\n", __func__, id);
717 ret = alloc_item_nonsecure(id, size_in);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600718 }
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600719 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
720 return ret;
721}
722EXPORT_SYMBOL(smem_alloc2);
723
Jeff Hugo63fa6062013-06-19 13:32:55 -0600724/**
725 * smem_alloc2_to_proc - Find an existing item, otherwise allocate it with
726 * security support
727 *
728 * @id: ID of SMEM item
729 * @size_in: Size of the SMEM item
730 * @to_proc: SMEM host that shares the item with apps
731 * @flags: Item attribute flags
732 * @returns: Pointer to SMEM item or NULL if it couldn't be found/allocated
733 */
734void *smem_alloc2_to_proc(unsigned id, unsigned size_in, unsigned to_proc,
735 unsigned flags)
736{
737 unsigned long lflags;
738 void *ret = NULL;
739 int rc;
740 unsigned size_out;
741 unsigned a_size_in;
742
743 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
744 flags);
745
746 if (!smem_initialized_check())
747 return NULL;
748
749 if (id >= SMEM_NUM_ITEMS) {
750 SMEM_INFO("%s: invalid id %u\n", __func__, id);
751 return NULL;
752 }
753
754 if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
755 SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
756 to_proc, id);
757 return NULL;
758 }
759
760 if (unlikely(!spinlocks_initialized)) {
761 rc = init_smem_remote_spinlock();
762 if (unlikely(rc)) {
763 SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
764 __func__, id, rc);
765 return NULL;
766 }
767 }
768
769 a_size_in = ALIGN(size_in, 8);
770 remote_spin_lock_irqsave(&remote_spinlock, lflags);
771
772 ret = __smem_get_entry_to_proc(id, &size_out, to_proc, flags, true,
773 false);
774 if (ret) {
775 SMEM_INFO("%s: %u already allocated\n", __func__, id);
776 if (a_size_in == size_out) {
777 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
778 return ret;
779 } else {
780 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
781 SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
782 __func__, id, size_out, a_size_in);
783 return NULL;
784 }
785 }
786
787 if (id > SMEM_FIXED_ITEM_LAST) {
788 SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
789 __func__, id, size_in, to_proc, flags);
790 if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
791 ret = alloc_item_nonsecure(id, a_size_in);
792 else
793 ret = alloc_item_secure(id, size_in, to_proc, flags);
794
795 } else {
796 SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
797 __func__, id);
798 }
799
800 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
801 return ret;
802}
803EXPORT_SYMBOL(smem_alloc2_to_proc);
804
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600805void *smem_get_entry(unsigned id, unsigned *size)
806{
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600807 return __smem_get_entry(id, size, false, true);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600808}
809EXPORT_SYMBOL(smem_get_entry);
810
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600811/**
Jeff Hugo63fa6062013-06-19 13:32:55 -0600812 * smem_get_entry_to_proc - Get existing item with security support
813 *
814 * @id: ID of SMEM item
815 * @size: Pointer to size variable for storing the result
816 * @to_proc: SMEM host that shares the item with apps
817 * @flags: Item attribute flags
818 * @returns: Pointer to SMEM item or NULL if it doesn't exist
819 */
820void *smem_get_entry_to_proc(unsigned id, unsigned *size, unsigned to_proc,
821 unsigned flags)
822{
823 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
824
825 return __smem_get_entry_to_proc(id, size, to_proc, flags, false, true);
826}
827EXPORT_SYMBOL(smem_get_entry_to_proc);
828
829/**
Eric Holmberg20cdfda2013-06-21 13:56:39 -0600830 * smem_get_entry_no_rlock - Get existing item without using remote spinlock
831 *
832 * @id: ID of SMEM item
833 * @size_out: Pointer to size variable for storing the result
834 * @returns: Pointer to SMEM item or NULL if it doesn't exist
835 *
836 * This function does not lock the remote spinlock and should only be used in
837 * failure-recover cases such as retrieving the subsystem failure reason during
838 * subsystem restart.
839 */
840void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out)
841{
842 return __smem_get_entry(id, size_out, false, false);
843}
844EXPORT_SYMBOL(smem_get_entry_no_rlock);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600845
846/**
847 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
848 *
849 * @returns: pointer to SMEM remote spinlock
850 */
851remote_spinlock_t *smem_get_remote_spinlock(void)
852{
Jeff Hugo84ec0d32013-06-26 10:58:14 -0600853 if (unlikely(!spinlocks_initialized))
854 init_smem_remote_spinlock();
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600855 return &remote_spinlock;
856}
857EXPORT_SYMBOL(smem_get_remote_spinlock);
858
Jeff Hugob9fb9402013-05-15 09:58:54 -0600859/**
Jeff Hugo830785d2013-08-27 13:20:10 -0600860 * smem_get_free_space() - Get the available allocation free space for a
861 * partition
862 *
863 * @to_proc: remote SMEM host. Determines the applicable partition
864 * @returns: size in bytes available to allocate
865 *
866 * Helper function for SMD so that SMD only scans the channel allocation
867 * table for a partition when it is reasonably certain that a channel has
868 * actually been created, because scanning can be expensive. Creating a channel
869 * will consume some of the free space in a partition, so SMD can compare the
870 * last free space size against the current free space size to determine if
871 * a channel may have been created. SMD can't do this directly, because the
872 * necessary partition internals are restricted to just SMEM.
873 */
874unsigned smem_get_free_space(unsigned to_proc)
875{
876 struct smem_partition_header *hdr;
877 struct smem_shared *shared;
878
879 if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
880 pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
881 return UINT_MAX;
882 }
883
884 if (partitions[to_proc].offset) {
885 if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
886 (uintptr_t)smem_areas[0].virt_addr,
887 partitions[to_proc].offset))) {
888 pr_err("%s: unexpected overflow detected\n", __func__);
889 return UINT_MAX;
890 }
891 hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
892 return hdr->offset_free_cached - hdr->offset_free_uncached;
893 } else {
894 shared = (void *)MSM_SHARED_RAM_BASE;
895 return shared->heap_info.heap_remaining;
896 }
897}
898EXPORT_SYMBOL(smem_get_free_space);
899
900/**
Jeff Hugob9fb9402013-05-15 09:58:54 -0600901 * init_smem_remote_spinlock - Reentrant remote spinlock initialization
902 *
Jeff Hugo63fa6062013-06-19 13:32:55 -0600903 * @returns: success or error code for failure
Jeff Hugob9fb9402013-05-15 09:58:54 -0600904 */
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600905static int init_smem_remote_spinlock(void)
Jeff Hugob9fb9402013-05-15 09:58:54 -0600906{
907 int rc = 0;
908
909 /*
910 * Optimistic locking. Init only needs to be done once by the first
911 * caller. After that, serializing inits between different callers
912 * is unnecessary. The second check after the lock ensures init
913 * wasn't previously completed by someone else before the lock could
914 * be grabbed.
915 */
916 if (!spinlocks_initialized) {
917 mutex_lock(&spinlock_init_lock);
918 if (!spinlocks_initialized) {
919 rc = remote_spin_lock_init(&remote_spinlock,
920 SMEM_SPINLOCK_SMEM_ALLOC);
921 if (!rc)
922 spinlocks_initialized = 1;
923 }
924 mutex_unlock(&spinlock_init_lock);
925 }
926 return rc;
927}
928
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600929/**
930 * smem_initialized_check - Reentrant check that smem has been initialized
931 *
932 * @returns: true if initialized, false if not.
933 */
934bool smem_initialized_check(void)
935{
936 static int checked;
937 static int is_inited;
938 unsigned long flags;
939 struct smem_shared *smem;
940 int *version_array;
941
942 if (likely(checked)) {
943 if (unlikely(!is_inited))
Jeff Hugo63fa6062013-06-19 13:32:55 -0600944 LOG_ERR("%s: smem not initialized\n", __func__);
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600945 return is_inited;
946 }
947
948 spin_lock_irqsave(&smem_init_check_lock, flags);
949 if (checked) {
950 spin_unlock_irqrestore(&smem_init_check_lock, flags);
951 if (unlikely(!is_inited))
Jeff Hugo63fa6062013-06-19 13:32:55 -0600952 LOG_ERR("%s: smem not initialized\n", __func__);
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600953 return is_inited;
954 }
955
956 smem = (void *)MSM_SHARED_RAM_BASE;
957
958 if (smem->heap_info.initialized != 1)
959 goto failed;
960 if (smem->heap_info.reserved != 0)
961 goto failed;
962
963 version_array = __smem_find(SMEM_VERSION_INFO, SMEM_VERSION_INFO_SIZE,
964 true);
965 if (version_array == NULL)
966 goto failed;
Eric Holmbergd46a2fd2013-07-30 15:16:22 -0600967
968 /*
969 * The Modem SBL is now the Master SBL version and is required to
970 * pre-initialize SMEM and fill in any necessary configuration
971 * structures. Without the extra configuration data, the SMEM driver
972 * cannot be properly initialized.
973 */
974 if (version_array[MODEM_SBL_VERSION_INDEX] != SMEM_VERSION << 16) {
975 pr_err("%s: SBL version not correct\n", __func__);
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600976 goto failed;
Eric Holmbergd46a2fd2013-07-30 15:16:22 -0600977 }
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600978
979 is_inited = 1;
980 checked = 1;
981 spin_unlock_irqrestore(&smem_init_check_lock, flags);
982 return is_inited;
983
984failed:
985 is_inited = 0;
986 checked = 1;
987 spin_unlock_irqrestore(&smem_init_check_lock, flags);
Eric Holmbergd46a2fd2013-07-30 15:16:22 -0600988 LOG_ERR(
989 "%s: shared memory needs to be initialized by SBL before booting\n",
Jeff Hugo429dc2c2013-05-28 15:06:07 -0600990 __func__);
991 return is_inited;
992}
993EXPORT_SYMBOL(smem_initialized_check);
994
Jeff Hugo5ba15fe2013-05-06 14:24:24 -0600995static int restart_notifier_cb(struct notifier_block *this,
996 unsigned long code,
997 void *data)
998{
999 if (code == SUBSYS_AFTER_SHUTDOWN) {
1000 struct restart_notifier_block *notifier;
1001
1002 notifier = container_of(this,
1003 struct restart_notifier_block, nb);
Jeff Hugo63fa6062013-06-19 13:32:55 -06001004 SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06001005 __func__, notifier->processor,
1006 notifier->name);
1007
1008 remote_spin_release(&remote_spinlock, notifier->processor);
1009 remote_spin_release_all(notifier->processor);
1010
1011 if (smem_ramdump_dev) {
1012 int ret;
1013
1014 SMEM_DBG("%s: saving ramdump\n", __func__);
1015 /*
1016 * XPU protection does not currently allow the
1017 * auxiliary memory regions to be dumped. If this
1018 * changes, then num_smem_areas + 1 should be passed
1019 * into do_elf_ramdump() to dump all regions.
1020 */
1021 ret = do_elf_ramdump(smem_ramdump_dev,
1022 smem_ramdump_segments, 1);
1023 if (ret < 0)
Jeff Hugo63fa6062013-06-19 13:32:55 -06001024 LOG_ERR("%s: unable to dump smem %d\n",
1025 __func__, ret);
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06001026 }
1027 }
1028
1029 return NOTIFY_DONE;
1030}
1031
1032static __init int modem_restart_late_init(void)
1033{
1034 int i;
1035 void *handle;
1036 struct restart_notifier_block *nb;
1037
1038 smem_ramdump_dev = create_ramdump_device("smem", NULL);
1039 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001040 LOG_ERR("%s: Unable to create smem ramdump device.\n",
Jeff Hugo5ba15fe2013-05-06 14:24:24 -06001041 __func__);
1042 smem_ramdump_dev = NULL;
1043 }
1044
1045 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
1046 nb = &restart_notifiers[i];
1047 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
1048 SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
1049 __func__, nb->name, handle);
1050 }
1051
1052 return 0;
1053}
1054late_initcall(modem_restart_late_init);
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001055
1056int smem_module_init_notifier_register(struct notifier_block *nb)
1057{
1058 int ret;
1059 if (!nb)
1060 return -EINVAL;
1061 mutex_lock(&smem_module_init_notifier_lock);
1062 ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
1063 if (smem_module_inited)
1064 nb->notifier_call(nb, 0, NULL);
1065 mutex_unlock(&smem_module_init_notifier_lock);
1066 return ret;
1067}
1068EXPORT_SYMBOL(smem_module_init_notifier_register);
1069
1070int smem_module_init_notifier_unregister(struct notifier_block *nb)
1071{
1072 int ret;
1073 if (!nb)
1074 return -EINVAL;
1075 mutex_lock(&smem_module_init_notifier_lock);
1076 ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
1077 nb);
1078 mutex_unlock(&smem_module_init_notifier_lock);
1079 return ret;
1080}
1081EXPORT_SYMBOL(smem_module_init_notifier_unregister);
1082
1083static void smem_module_init_notify(uint32_t state, void *data)
1084{
1085 mutex_lock(&smem_module_init_notifier_lock);
1086 smem_module_inited = 1;
1087 raw_notifier_call_chain(&smem_module_init_notifier_list,
1088 state, data);
1089 mutex_unlock(&smem_module_init_notifier_lock);
1090}
1091
Jeff Hugo63fa6062013-06-19 13:32:55 -06001092/**
1093 * smem_init_security_partition - Init local structures for a secured smem
1094 * partition that has apps as one of the hosts
1095 *
1096 * @entry: Entry in the security TOC for the partition to init
1097 * @num: Partition ID
1098 *
1099 * Initialize local data structures to point to a secured smem partition
1100 * that is accessible by apps and another processor. Assumes that one of the
1101 * listed hosts is apps. Verifiess that the partition is valid, otherwise will
1102 * skip. Checks for memory corruption and will BUG() if detected. Assumes
1103 * smem_areas is already initialized and that smem_areas[0] corresponds to the
1104 * smem region with the secured partitions.
1105 */
1106static void smem_init_security_partition(struct smem_toc_entry *entry,
1107 uint32_t num)
1108{
1109 uint16_t remote_host;
1110 struct smem_partition_header *hdr;
1111
1112 if (!entry->offset) {
1113 SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
1114 return;
1115 }
1116 if (!entry->size) {
1117 SMEM_INFO("Skipping smem partition %d - bad size\n", num);
1118 return;
1119 }
1120 if (!entry->size_cacheline) {
1121 SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
1122 return;
1123 }
1124
1125 if (entry->host0 == SMEM_APPS)
1126 remote_host = entry->host1;
1127 else
1128 remote_host = entry->host0;
1129
1130 if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
1131 SMEM_INFO("Skipping smem partition %d - bad remote:%d\n", num,
1132 remote_host);
1133 return;
1134 }
1135 if (partitions[remote_host].offset) {
1136 SMEM_INFO("Skipping smem partition %d - duplicate of %d\n", num,
1137 partitions[remote_host].partition_num);
1138 return;
1139 }
1140
1141 hdr = smem_areas[0].virt_addr + entry->offset;
1142
1143 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
1144 LOG_ERR("Smem partition %d hdr magic is bad\n", num);
1145 BUG();
1146 }
1147 if (!hdr->size) {
1148 LOG_ERR("Smem partition %d size is 0\n", num);
1149 BUG();
1150 }
1151 if (hdr->offset_free_uncached > hdr->size) {
1152 LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
1153 BUG();
1154 }
1155 if (hdr->offset_free_cached > hdr->size) {
1156 LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
1157 BUG();
1158 }
1159 if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
1160 LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
1161 BUG();
1162 }
1163 if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
1164 LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
1165 BUG();
1166 }
1167
1168 partitions[remote_host].partition_num = num;
1169 partitions[remote_host].offset = entry->offset;
1170 partitions[remote_host].size_cacheline = entry->size_cacheline;
1171 SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
1172 remote_host);
1173}
1174
1175/**
1176 * smem_init_security - Init local support for secured smem
1177 *
1178 * Looks for a valid security TOC, and if one is found, parses it looking for
1179 * partitions that apps can access. If any such partitions are found, do the
1180 * required local initialization to support them. Assumes smem_areas is inited
1181 * and smem_area[0] corresponds to the smem region with the TOC.
1182 */
1183static void smem_init_security(void)
1184{
1185 struct smem_toc *toc;
1186 uint32_t i;
1187
1188 SMEM_DBG("%s\n", __func__);
1189
1190 toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
1191
1192 if (toc->identifier != SMEM_TOC_IDENTIFIER) {
1193 LOG_ERR("%s failed: invalid TOC magic\n", __func__);
1194 return;
1195 }
1196
1197 for (i = 0; i < toc->num_entries; ++i) {
1198 SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
1199 toc->entry[i].host0,
1200 toc->entry[i].host1);
1201
1202 if (toc->entry[i].host0 == SMEM_APPS ||
1203 toc->entry[i].host1 == SMEM_APPS)
1204 smem_init_security_partition(&toc->entry[i], i);
1205 }
1206
1207 SMEM_DBG("%s done\n", __func__);
1208}
1209
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001210static int msm_smem_probe(struct platform_device *pdev)
1211{
1212 char *key;
1213 struct resource *r;
1214 phys_addr_t aux_mem_base;
1215 resource_size_t aux_mem_size;
1216 int temp_string_size = 11; /* max 3 digit count */
1217 char temp_string[temp_string_size];
1218 int ret;
1219 struct ramdump_segment *ramdump_segments_tmp = NULL;
1220 struct smem_area *smem_areas_tmp = NULL;
1221 int smem_idx = 0;
Jeff Hugo63fa6062013-06-19 13:32:55 -06001222 bool security_enabled;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001223
1224 if (!smem_initialized_check())
1225 return -ENODEV;
1226
1227 key = "irq-reg-base";
1228 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
1229 if (!r) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001230 LOG_ERR("%s: missing '%s'\n", __func__, key);
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001231 return -ENODEV;
1232 }
1233
1234 num_smem_areas = 1;
1235 while (1) {
1236 scnprintf(temp_string, temp_string_size, "aux-mem%d",
1237 num_smem_areas);
1238 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1239 temp_string);
1240 if (!r)
1241 break;
1242
1243 ++num_smem_areas;
1244 if (num_smem_areas > 999) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001245 LOG_ERR("%s: max num aux mem regions reached\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001246 __func__);
1247 break;
1248 }
1249 }
1250 /* Initialize main SMEM region and SSR ramdump region */
1251 key = "smem";
1252 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
1253 if (!r) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001254 LOG_ERR("%s: missing '%s'\n", __func__, key);
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001255 return -ENODEV;
1256 }
1257
1258 smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
1259 GFP_KERNEL);
1260 if (!smem_areas_tmp) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001261 LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001262 ret = -ENOMEM;
1263 goto free_smem_areas;
1264 }
1265
1266 ramdump_segments_tmp = kmalloc_array(num_smem_areas,
1267 sizeof(struct ramdump_segment), GFP_KERNEL);
1268 if (!ramdump_segments_tmp) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001269 LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001270 ret = -ENOMEM;
1271 goto free_smem_areas;
1272 }
1273 smem_areas_tmp[smem_idx].phys_addr = r->start;
1274 smem_areas_tmp[smem_idx].size = resource_size(r);
1275 smem_areas_tmp[smem_idx].virt_addr = MSM_SHARED_RAM_BASE;
1276
1277 ramdump_segments_tmp[smem_idx].address = r->start;
1278 ramdump_segments_tmp[smem_idx].size = resource_size(r);
1279 ++smem_idx;
1280
1281 /* Configure auxiliary SMEM regions */
1282 while (1) {
1283 scnprintf(temp_string, temp_string_size, "aux-mem%d",
1284 smem_idx);
1285 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1286 temp_string);
1287 if (!r)
1288 break;
1289 aux_mem_base = r->start;
1290 aux_mem_size = resource_size(r);
1291
1292 ramdump_segments_tmp[smem_idx].address = aux_mem_base;
1293 ramdump_segments_tmp[smem_idx].size = aux_mem_size;
1294
1295 smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
1296 smem_areas_tmp[smem_idx].size = aux_mem_size;
1297 smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
1298 (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
1299 smem_areas_tmp[smem_idx].size);
1300 SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
1301 &aux_mem_base, &aux_mem_size,
1302 smem_areas_tmp[smem_idx].virt_addr);
1303
1304 if (!smem_areas_tmp[smem_idx].virt_addr) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001305 LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001306 __func__,
1307 &smem_areas_tmp[smem_idx].phys_addr,
1308 &smem_areas_tmp[smem_idx].size);
1309 ret = -ENOMEM;
1310 goto free_smem_areas;
1311 }
1312
1313 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
1314 (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
1315 smem_areas_tmp[smem_idx].size)) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001316 LOG_ERR(
1317 "%s: invalid virtual address block %i: %p:%pa\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001318 __func__, smem_idx,
1319 smem_areas_tmp[smem_idx].virt_addr,
1320 &smem_areas_tmp[smem_idx].size);
1321 ++smem_idx;
1322 ret = -EINVAL;
1323 goto free_smem_areas;
1324 }
1325
1326 ++smem_idx;
1327 if (smem_idx > 999) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001328 LOG_ERR("%s: max num aux mem regions reached\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001329 __func__);
1330 break;
1331 }
1332 }
1333
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001334 smem_areas = smem_areas_tmp;
1335 smem_ramdump_segments = ramdump_segments_tmp;
Jeff Hugo63fa6062013-06-19 13:32:55 -06001336
1337 key = "mpu-enabled";
1338 security_enabled = of_property_read_bool(pdev->dev.of_node, key);
1339 if (security_enabled) {
1340 SMEM_INFO("smem security enabled\n");
1341 smem_init_security();
1342 }
1343
1344 ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1345 if (ret)
1346 LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
1347
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001348 return 0;
1349
1350free_smem_areas:
1351 for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
1352 iounmap(smem_areas_tmp[smem_idx].virt_addr);
1353
1354 num_smem_areas = 0;
1355 kfree(ramdump_segments_tmp);
1356 kfree(smem_areas_tmp);
1357 return ret;
1358}
1359
1360static struct of_device_id msm_smem_match_table[] = {
1361 { .compatible = "qcom,smem" },
1362 {},
1363};
1364
1365static struct platform_driver msm_smem_driver = {
1366 .probe = msm_smem_probe,
1367 .driver = {
1368 .name = "msm_smem",
1369 .owner = THIS_MODULE,
1370 .of_match_table = msm_smem_match_table,
1371 },
1372};
1373
1374int __init msm_smem_init(void)
1375{
1376 static bool registered;
1377 int rc;
1378
1379 if (registered)
1380 return 0;
1381
1382 registered = true;
1383
Jeff Hugo63fa6062013-06-19 13:32:55 -06001384 smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem");
1385 if (!smem_ipc_log_ctx) {
1386 pr_err("%s: unable to create logging context\n", __func__);
1387 msm_smem_debug_mask = 0;
1388 }
1389
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001390 rc = init_smem_remote_spinlock();
1391 if (rc) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001392 LOG_ERR("%s: remote spinlock init failed %d\n", __func__, rc);
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001393 return rc;
1394 }
1395
1396 rc = platform_driver_register(&msm_smem_driver);
1397 if (rc) {
Jeff Hugo63fa6062013-06-19 13:32:55 -06001398 LOG_ERR("%s: msm_smem_driver register failed %d\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001399 __func__, rc);
1400 return rc;
1401 }
1402
1403 smem_module_init_notify(0, NULL);
1404
1405 return 0;
1406}
1407
1408module_init(msm_smem_init);