blob: c2fb37bbc1fa09548407e6663d77086127cee4d8 [file] [log] [blame]
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Chris Lewb4791c32016-08-01 11:58:55 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/ipc_logging.h>
17#include <linux/kernel.h>
18#include <linux/moduleparam.h>
19#include <linux/notifier.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/printk.h>
24#include <linux/slab.h>
25#include <linux/stat.h>
26#include <soc/qcom/subsystem_notif.h>
27#include <soc/qcom/subsystem_restart.h>
28#include <soc/qcom/ramdump.h>
29
30#include <soc/qcom/smem.h>
31
32
33#include "smem_private.h"
34
35#define MODEM_SBL_VERSION_INDEX 7
36#define SMEM_VERSION_INFO_SIZE (32 * 4)
37#define SMEM_VERSION 0x000B
38
39enum {
40 MSM_SMEM_DEBUG = 1U << 0,
41 MSM_SMEM_INFO = 1U << 1,
42};
43
44static int msm_smem_debug_mask = MSM_SMEM_INFO;
45module_param_named(debug_mask, msm_smem_debug_mask,
46 int, S_IRUGO | S_IWUSR | S_IWGRP);
47static void *smem_ipc_log_ctx;
48#define NUM_LOG_PAGES 4
49
50#define IPC_LOG(x...) do { \
51 if (smem_ipc_log_ctx) \
52 ipc_log_string(smem_ipc_log_ctx, x); \
53 } while (0)
54
55
56#define LOG_ERR(x...) do { \
57 pr_err(x); \
58 IPC_LOG(x); \
59 } while (0)
60#define SMEM_DBG(x...) do { \
61 if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
62 IPC_LOG(x); \
63 } while (0)
64#define SMEM_INFO(x...) do { \
65 if (msm_smem_debug_mask & MSM_SMEM_INFO) \
66 IPC_LOG(x); \
67 } while (0)
68
69#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
70
71static void *smem_ram_base;
72static resource_size_t smem_ram_size;
73static phys_addr_t smem_ram_phys;
74static remote_spinlock_t remote_spinlock;
75static uint32_t num_smem_areas;
76static struct smem_area *smem_areas;
77static struct ramdump_segment *smem_ramdump_segments;
78static int spinlocks_initialized;
79static void *smem_ramdump_dev;
80static DEFINE_MUTEX(spinlock_init_lock);
81static DEFINE_SPINLOCK(smem_init_check_lock);
82static int smem_module_inited;
83static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
84static DEFINE_MUTEX(smem_module_init_notifier_lock);
85static bool probe_done;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +053086uint32_t smem_max_items;
Chris Lewb4791c32016-08-01 11:58:55 -070087
88/* smem security feature components */
89#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
90#define SMEM_TOC_MAX_EXCLUSIONS 4
91#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
92#define SMEM_ALLOCATION_CANARY 0xa5a5
93
94struct smem_toc_entry {
95 uint32_t offset;
96 uint32_t size;
97 uint32_t flags;
98 uint16_t host0;
99 uint16_t host1;
100 uint32_t size_cacheline;
101 uint32_t reserved[3];
102 uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
103};
104
105struct smem_toc {
106 /* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
107 uint32_t identifier;
108 uint32_t version;
109 uint32_t num_entries;
110 uint32_t reserved[5];
111 struct smem_toc_entry entry[];
112};
113
114struct smem_partition_header {
115 /* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
116 uint32_t identifier;
117 uint16_t host0;
118 uint16_t host1;
119 uint32_t size;
120 uint32_t offset_free_uncached;
121 uint32_t offset_free_cached;
122 uint32_t reserved[3];
123};
124
125struct smem_partition_allocation_header {
126 /* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
127 uint16_t canary;
128 uint16_t smem_type;
129 uint32_t size; /* includes padding bytes */
130 uint16_t padding_data;
131 uint16_t padding_hdr;
132 uint32_t reserved[1];
133};
134
135struct smem_partition_info {
136 uint32_t partition_num;
137 uint32_t offset;
138 uint32_t size_cacheline;
139};
140
141static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530142
143#define SMEM_COMM_PART_VERSION 0x000C
144#define SMEM_COMM_HOST 0xFFFE
145static bool use_comm_partition;
146static struct smem_partition_info comm_partition;
Chris Lewb4791c32016-08-01 11:58:55 -0700147/* end smem security feature components */
148
149/* Identifier for the SMEM target info struct. */
150#define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
151
152struct smem_targ_info_type {
153 /* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
154 uint32_t identifier;
155 uint32_t size;
156 phys_addr_t phys_base_addr;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530157 uint32_t max_items;
Chris Lewb4791c32016-08-01 11:58:55 -0700158};
159
160struct restart_notifier_block {
161 unsigned int processor;
162 char *name;
163 struct notifier_block nb;
164};
165
166static int restart_notifier_cb(struct notifier_block *this,
167 unsigned long code,
168 void *data);
169
170static struct restart_notifier_block restart_notifiers[] = {
171 {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
172 {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
173 {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
174 {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
175 {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
176 {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
177 {SMEM_DSPS, "slpi", .nb.notifier_call = restart_notifier_cb},
178};
179
180static int init_smem_remote_spinlock(void);
181
182/**
183 * is_probe_done() - Did the probe function successfully complete
184 *
185 * @return - true if probe successfully completed, false if otherwise
186 *
187 * Helper function for EPROBE_DEFER support. If this function returns false,
188 * the calling function should immediately return -EPROBE_DEFER.
189 */
190static bool is_probe_done(void)
191{
192 return probe_done;
193}
194
195/**
196 * smem_phys_to_virt() - Convert a physical base and offset to virtual address
197 *
198 * @base: physical base address to check
199 * @offset: offset from the base to get the final address
200 * @returns: virtual SMEM address; NULL for failure
201 *
202 * Takes a physical address and an offset and checks if the resulting physical
203 * address would fit into one of the smem regions. If so, returns the
204 * corresponding virtual address. Otherwise returns NULL.
205 */
206static void *smem_phys_to_virt(phys_addr_t base, unsigned int offset)
207{
208 int i;
209 phys_addr_t phys_addr;
210 resource_size_t size;
211
212 if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
213 return NULL;
214
215 if (!smem_areas) {
216 /*
217 * Early boot - no area configuration yet, so default
218 * to using the main memory region.
219 *
220 * To remove the MSM_SHARED_RAM_BASE and the static
221 * mapping of SMEM in the future, add dump_stack()
222 * to identify the early callers of smem_get_entry()
223 * (which calls this function) and replace those calls
224 * with a new function that knows how to lookup the
225 * SMEM base address before SMEM has been probed.
226 */
227 phys_addr = smem_ram_phys;
228 size = smem_ram_size;
229
230 if (base >= phys_addr && base + offset < phys_addr + size) {
231 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
232 (uintptr_t)smem_ram_base, offset)) {
233 SMEM_INFO("%s: overflow %p %x\n", __func__,
234 smem_ram_base, offset);
235 return NULL;
236 }
237
238 return smem_ram_base + offset;
239 } else {
240 return NULL;
241 }
242 }
243 for (i = 0; i < num_smem_areas; ++i) {
244 phys_addr = smem_areas[i].phys_addr;
245 size = smem_areas[i].size;
246
247 if (base < phys_addr || base + offset >= phys_addr + size)
248 continue;
249
250 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
251 (uintptr_t)smem_areas[i].virt_addr, offset)) {
252 SMEM_INFO("%s: overflow %p %x\n", __func__,
253 smem_areas[i].virt_addr, offset);
254 return NULL;
255 }
256
257 return smem_areas[i].virt_addr + offset;
258 }
259
260 return NULL;
261}
262
263/**
264 * smem_virt_to_phys() - Convert SMEM address to physical address.
265 *
266 * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
267 * @returns: Physical address (or NULL if there is a failure)
268 *
269 * This function should only be used if an SMEM item needs to be handed
270 * off to a DMA engine. This function will not return a version of EPROBE_DEFER
271 * if the driver is not ready since the caller should obtain @smem_address from
272 * one of the other public APIs and get EPROBE_DEFER at that time, if
273 * applicable.
274 */
275phys_addr_t smem_virt_to_phys(void *smem_address)
276{
277 phys_addr_t phys_addr = 0;
278 int i;
279 void *vend;
280
281 if (!smem_areas)
282 return phys_addr;
283
284 for (i = 0; i < num_smem_areas; ++i) {
285 vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
286
287 if (smem_address >= smem_areas[i].virt_addr &&
288 smem_address < vend) {
289 phys_addr = smem_address - smem_areas[i].virt_addr;
290 phys_addr += smem_areas[i].phys_addr;
291 break;
292 }
293 }
294
295 return phys_addr;
296}
297EXPORT_SYMBOL(smem_virt_to_phys);
298
299/**
300 * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
301 *
302 * @id: ID of SMEM item
303 * @size: Pointer to size variable for storing the result
304 * @skip_init_check: True means do not verify that SMEM has been initialized
305 * @use_rspinlock: True to use the remote spinlock
306 * @returns: Pointer to SMEM item or NULL if it doesn't exist
307 */
308static void *__smem_get_entry_nonsecure(unsigned int id, unsigned int *size,
309 bool skip_init_check, bool use_rspinlock)
310{
311 struct smem_shared *shared = smem_ram_base;
312 struct smem_heap_entry *toc = shared->heap_toc;
313 int use_spinlocks = spinlocks_initialized && use_rspinlock;
314 void *ret = 0;
315 unsigned long flags = 0;
316 int rc;
317
318 if (!skip_init_check && !smem_initialized_check())
319 return ret;
320
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530321 if (id >= smem_max_items)
Chris Lewb4791c32016-08-01 11:58:55 -0700322 return ret;
323
324 if (use_spinlocks) {
325 do {
326 rc = remote_spin_trylock_irqsave(&remote_spinlock,
327 flags);
328 } while (!rc);
329 }
330 /* toc is in device memory and cannot be speculatively accessed */
331 if (toc[id].allocated) {
332 phys_addr_t phys_base;
333
334 *size = toc[id].size;
335 barrier();
336
337 phys_base = toc[id].reserved & BASE_ADDR_MASK;
338 if (!phys_base)
339 phys_base = smem_ram_phys;
340 ret = smem_phys_to_virt(phys_base, toc[id].offset);
341 } else {
342 *size = 0;
343 }
344 if (use_spinlocks)
345 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
346
347 return ret;
348}
349
350/**
351 * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
352 * security support
353 *
354 * @id: ID of SMEM item
355 * @size: Pointer to size variable for storing the result
356 * @to_proc: SMEM host that shares the item with apps
357 * @flags: Item attribute flags
358 * @skip_init_check: True means do not verify that SMEM has been initialized
359 * @use_rspinlock: True to use the remote spinlock
360 * @returns: Pointer to SMEM item or NULL if it doesn't exist
361 */
362static void *__smem_get_entry_secure(unsigned int id,
363 unsigned int *size,
364 unsigned int to_proc,
365 unsigned int flags,
366 bool skip_init_check,
367 bool use_rspinlock)
368{
369 struct smem_partition_header *hdr;
370 unsigned long lflags = 0;
371 void *item = NULL;
372 struct smem_partition_allocation_header *alloc_hdr;
373 uint32_t partition_num;
374 uint32_t a_hdr_size;
375 int rc;
376
377 SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
378 flags, skip_init_check, use_rspinlock);
379
380 if (!skip_init_check && !smem_initialized_check())
381 return NULL;
382
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530383 if (id >= smem_max_items) {
Chris Lewb4791c32016-08-01 11:58:55 -0700384 SMEM_INFO("%s: invalid id %d\n", __func__, id);
385 return NULL;
386 }
387
388 if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
389 SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
390 to_proc);
391 return NULL;
392 }
393
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530394 if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
395 if (use_comm_partition) {
396 partition_num = comm_partition.partition_num;
397 hdr = smem_areas[0].virt_addr + comm_partition.offset;
398 } else {
399 return __smem_get_entry_nonsecure(id, size,
400 skip_init_check, use_rspinlock);
401 }
402 } else {
403 partition_num = partitions[to_proc].partition_num;
404 hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
405 }
Chris Lewb4791c32016-08-01 11:58:55 -0700406 if (unlikely(!spinlocks_initialized)) {
407 rc = init_smem_remote_spinlock();
408 if (unlikely(rc)) {
409 SMEM_INFO(
410 "%s: id:%u remote spinlock init failed %d\n",
411 __func__, id, rc);
412 return NULL;
413 }
414 }
415 if (use_rspinlock) {
416 do {
417 rc = remote_spin_trylock_irqsave(&remote_spinlock,
418 lflags);
419 } while (!rc);
420 }
421 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
422 LOG_ERR(
423 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
424 __func__,
425 partition_num,
426 to_proc,
427 hdr);
428 BUG();
429 }
430
431 if (flags & SMEM_ITEM_CACHED_FLAG) {
432 a_hdr_size = ALIGN(sizeof(*alloc_hdr),
433 partitions[to_proc].size_cacheline);
434 for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
435 (void *)(alloc_hdr) > (void *)(hdr) +
436 hdr->offset_free_cached;
437 alloc_hdr = (void *)(alloc_hdr) -
438 alloc_hdr->size - a_hdr_size) {
439 if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
440 LOG_ERR(
441 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
442 __func__,
443 partition_num,
444 to_proc,
445 alloc_hdr);
446 BUG();
447
448 }
449 if (alloc_hdr->smem_type == id) {
450 /* 8 byte alignment to match legacy */
451 *size = ALIGN(alloc_hdr->size -
452 alloc_hdr->padding_data, 8);
453 item = (void *)(alloc_hdr) - alloc_hdr->size;
454 break;
455 }
456 }
457 } else {
458 for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
459 (void *)(alloc_hdr) < (void *)(hdr) +
460 hdr->offset_free_uncached;
461 alloc_hdr = (void *)(alloc_hdr) +
462 sizeof(*alloc_hdr) +
463 alloc_hdr->padding_hdr +
464 alloc_hdr->size) {
465 if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
466 LOG_ERR(
467 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
468 __func__,
469 partition_num,
470 to_proc,
471 alloc_hdr);
472 BUG();
473
474 }
475 if (alloc_hdr->smem_type == id) {
476 /* 8 byte alignment to match legacy */
477 *size = ALIGN(alloc_hdr->size -
478 alloc_hdr->padding_data, 8);
479 item = (void *)(alloc_hdr) +
480 sizeof(*alloc_hdr) +
481 alloc_hdr->padding_hdr;
482 break;
483 }
484 }
485 }
486 if (use_rspinlock)
487 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
488
489 return item;
490}
491
492static void *__smem_find(unsigned int id, unsigned int size_in,
493 bool skip_init_check)
494{
495 unsigned int size;
496 void *ptr;
497
498 ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
499 if (!ptr)
500 return 0;
501
502 size_in = ALIGN(size_in, 8);
503 if (size_in != size) {
504 SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
505 id, size_in, size);
506 return 0;
507 }
508
509 return ptr;
510}
511
512/**
513 * smem_find - Find existing item with security support
514 *
515 * @id: ID of SMEM item
516 * @size_in: Size of the SMEM item
517 * @to_proc: SMEM host that shares the item with apps
518 * @flags: Item attribute flags
519 * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
520 * if the driver is not ready
521 */
522void *smem_find(unsigned int id, unsigned int size_in, unsigned int to_proc,
523 unsigned int flags)
524{
525 unsigned int size;
526 void *ptr;
527
528 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
529 flags);
530
531 /*
532 * Handle the circular dependecy between SMEM and software implemented
533 * remote spinlocks. SMEM must initialize the remote spinlocks in
534 * probe() before it is done. EPROBE_DEFER handling will not resolve
535 * this code path, so we must be intellegent to know that the spinlock
536 * item is a special case.
537 */
538 if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
539 return ERR_PTR(-EPROBE_DEFER);
540
541 ptr = smem_get_entry(id, &size, to_proc, flags);
542 if (!ptr)
543 return 0;
544
545 size_in = ALIGN(size_in, 8);
546 if (size_in != size) {
547 SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
548 id, size_in, to_proc, flags, size);
549 return 0;
550 }
551
552 return ptr;
553}
554EXPORT_SYMBOL(smem_find);
555
556/**
557 * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
558 *
559 * @id: ID of SMEM item
560 * @size_in: Size to allocate
561 * @returns: Pointer to SMEM item or NULL for error
562 *
563 * Assumes the id parameter is valid and does not already exist. Assumes
564 * size_in is already adjusted for alignment, if necessary. Requires the
565 * remote spinlock to already be locked.
566 */
567static void *alloc_item_nonsecure(unsigned int id, unsigned int size_in)
568{
569 void *smem_base = smem_ram_base;
570 struct smem_shared *shared = smem_base;
571 struct smem_heap_entry *toc = shared->heap_toc;
572 void *ret = NULL;
573
574 if (shared->heap_info.heap_remaining >= size_in) {
575 toc[id].offset = shared->heap_info.free_offset;
576 toc[id].size = size_in;
577 /*
578 * wmb() is necessary to ensure the allocation data is
579 * consistent before setting the allocated flag to prevent race
580 * conditions with remote processors
581 */
582 wmb();
583 toc[id].allocated = 1;
584
585 shared->heap_info.free_offset += size_in;
586 shared->heap_info.heap_remaining -= size_in;
587 ret = smem_base + toc[id].offset;
588 /*
589 * wmb() is necessary to ensure the heap data is consistent
590 * before continuing to prevent race conditions with remote
591 * processors
592 */
593 wmb();
594 } else {
595 SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
596 __func__, id, shared->heap_info.heap_remaining,
597 size_in);
598 }
599
600 return ret;
601}
602
603/**
604 * alloc_item_secure - Allocate an SMEM item in a secure partition
605 *
606 * @id: ID of SMEM item
607 * @size_in: Size to allocate
608 * @to_proc: SMEM host that shares the item with apps
609 * @flags: Item attribute flags
610 * @returns: Pointer to SMEM item or NULL for error
611 *
612 * Assumes the id parameter is valid and does not already exist. Assumes
613 * size_in is the raw size requested by the client. Assumes to_proc is a valid
614 * host, and a valid partition to that host exists. Requires the remote
615 * spinlock to already be locked.
616 */
617static void *alloc_item_secure(unsigned int id, unsigned int size_in,
618 unsigned int to_proc, unsigned int flags)
619{
620 void *smem_base = smem_ram_base;
621 struct smem_partition_header *hdr;
622 struct smem_partition_allocation_header *alloc_hdr;
623 uint32_t a_hdr_size;
624 uint32_t a_data_size;
625 uint32_t size_cacheline;
626 uint32_t free_space;
627 uint32_t partition_num;
628 void *ret = NULL;
629
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530630 if (to_proc == SMEM_COMM_HOST) {
631 hdr = smem_base + comm_partition.offset;
632 partition_num = comm_partition.partition_num;
633 size_cacheline = comm_partition.size_cacheline;
634 } else if (to_proc < NUM_SMEM_SUBSYSTEMS) {
635 hdr = smem_base + partitions[to_proc].offset;
636 partition_num = partitions[to_proc].partition_num;
637 size_cacheline = partitions[to_proc].size_cacheline;
638 } else {
639 SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
640 to_proc, id);
641 return NULL;
642 }
Chris Lewb4791c32016-08-01 11:58:55 -0700643
644 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
645 LOG_ERR(
646 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
647 __func__,
648 partition_num,
649 to_proc,
650 hdr);
651 BUG();
652 }
653
Chris Lewb4791c32016-08-01 11:58:55 -0700654 free_space = hdr->offset_free_cached -
655 hdr->offset_free_uncached;
656
657 if (flags & SMEM_ITEM_CACHED_FLAG) {
658 a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
659 a_data_size = ALIGN(size_in, size_cacheline);
660 if (free_space < a_hdr_size + a_data_size) {
661 SMEM_INFO(
662 "%s: id %u not enough memory %u (required %u)\n",
663 __func__, id, free_space,
664 a_hdr_size + a_data_size);
665 return ret;
666 }
667 alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
668 a_hdr_size;
669 alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
670 alloc_hdr->smem_type = id;
671 alloc_hdr->size = a_data_size;
672 alloc_hdr->padding_data = a_data_size - size_in;
673 alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
674 hdr->offset_free_cached = hdr->offset_free_cached -
675 a_hdr_size - a_data_size;
676 ret = (void *)(alloc_hdr) - a_data_size;
677 /*
678 * The SMEM protocol currently does not support cacheable
679 * areas within the smem region, but if it ever does in the
680 * future, then cache management needs to be done here.
681 * The area of memory this item is allocated from will need to
682 * be dynamically made cachable, and a cache flush of the
683 * allocation header using __cpuc_flush_dcache_area and
684 * outer_flush_area will need to be done.
685 */
686 } else {
687 a_hdr_size = sizeof(*alloc_hdr);
688 a_data_size = ALIGN(size_in, 8);
689 if (free_space < a_hdr_size + a_data_size) {
690 SMEM_INFO(
691 "%s: id %u not enough memory %u (required %u)\n",
692 __func__, id, free_space,
693 a_hdr_size + a_data_size);
694 return ret;
695 }
696 alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
697 alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
698 alloc_hdr->smem_type = id;
699 alloc_hdr->size = a_data_size;
700 alloc_hdr->padding_data = a_data_size - size_in;
701 alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
702 hdr->offset_free_uncached = hdr->offset_free_uncached +
703 a_hdr_size + a_data_size;
704 ret = alloc_hdr + 1;
705 }
706 /*
707 * wmb() is necessary to ensure the heap and allocation data is
708 * consistent before continuing to prevent race conditions with remote
709 * processors
710 */
711 wmb();
712
713 return ret;
714}
715
716/**
717 * smem_alloc - Find an existing item, otherwise allocate it with security
718 * support
719 *
720 * @id: ID of SMEM item
721 * @size_in: Size of the SMEM item
722 * @to_proc: SMEM host that shares the item with apps
723 * @flags: Item attribute flags
724 * @returns: Pointer to SMEM item, NULL if it couldn't be found/allocated,
725 * or -EPROBE_DEFER if the driver is not ready
726 */
727void *smem_alloc(unsigned int id, unsigned int size_in, unsigned int to_proc,
728 unsigned int flags)
729{
730 unsigned long lflags;
731 void *ret = NULL;
732 int rc;
733 unsigned int size_out;
734 unsigned int a_size_in;
735
736 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
737 flags);
738
739 if (!is_probe_done())
740 return ERR_PTR(-EPROBE_DEFER);
741
742 if (!smem_initialized_check())
743 return NULL;
744
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530745 if (id >= smem_max_items) {
Chris Lewb4791c32016-08-01 11:58:55 -0700746 SMEM_INFO("%s: invalid id %u\n", __func__, id);
747 return NULL;
748 }
749
750 if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
751 SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
752 to_proc, id);
753 return NULL;
754 }
755
756 if (unlikely(!spinlocks_initialized)) {
757 rc = init_smem_remote_spinlock();
758 if (unlikely(rc)) {
759 SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
760 __func__, id, rc);
761 return NULL;
762 }
763 }
764
765 a_size_in = ALIGN(size_in, 8);
766 do {
767 rc = remote_spin_trylock_irqsave(&remote_spinlock, lflags);
768 } while (!rc);
769
770 ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
771 false);
772 if (ret) {
773 SMEM_INFO("%s: %u already allocated\n", __func__, id);
774 if (a_size_in == size_out) {
775 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
776 return ret;
777 }
778 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
779 SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
780 __func__, id, size_out, a_size_in);
781 return NULL;
782 }
783
784 if (id > SMEM_FIXED_ITEM_LAST) {
785 SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
786 __func__, id, size_in, to_proc, flags);
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530787 if (flags & SMEM_ANY_HOST_FLAG
788 || !partitions[to_proc].offset) {
789 if (use_comm_partition)
790 ret = alloc_item_secure(id, size_in,
791 SMEM_COMM_HOST, flags);
792 else
793 ret = alloc_item_nonsecure(id, a_size_in);
794 } else {
Chris Lewb4791c32016-08-01 11:58:55 -0700795 ret = alloc_item_secure(id, size_in, to_proc, flags);
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530796 }
Chris Lewb4791c32016-08-01 11:58:55 -0700797 } else {
798 SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
799 __func__, id);
800 }
801
802 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
803 return ret;
804}
805EXPORT_SYMBOL(smem_alloc);
806
807/**
808 * smem_get_entry - Get existing item with security support
809 *
810 * @id: ID of SMEM item
811 * @size: Pointer to size variable for storing the result
812 * @to_proc: SMEM host that shares the item with apps
813 * @flags: Item attribute flags
814 * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
815 * if the driver isn't ready
816 */
817void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
818 unsigned int flags)
819{
820 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
821
822 /*
823 * Handle the circular dependecy between SMEM and software implemented
824 * remote spinlocks. SMEM must initialize the remote spinlocks in
825 * probe() before it is done. EPROBE_DEFER handling will not resolve
826 * this code path, so we must be intellegent to know that the spinlock
827 * item is a special case.
828 */
829 if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
830 return ERR_PTR(-EPROBE_DEFER);
831
832 return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
833}
834EXPORT_SYMBOL(smem_get_entry);
835
836/**
837 * smem_get_entry_no_rlock - Get existing item without using remote spinlock
838 *
839 * @id: ID of SMEM item
840 * @size_out: Pointer to size variable for storing the result
841 * @to_proc: SMEM host that shares the item with apps
842 * @flags: Item attribute flags
843 * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
844 * if the driver isn't ready
845 *
846 * This function does not lock the remote spinlock and should only be used in
847 * failure-recover cases such as retrieving the subsystem failure reason during
848 * subsystem restart.
849 */
850void *smem_get_entry_no_rlock(unsigned int id, unsigned int *size_out,
851 unsigned int to_proc, unsigned int flags)
852{
853 if (!is_probe_done())
854 return ERR_PTR(-EPROBE_DEFER);
855
856 return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
857 false);
858}
859EXPORT_SYMBOL(smem_get_entry_no_rlock);
860
861/**
862 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
863 *
864 * @returns: pointer to SMEM remote spinlock
865 */
866remote_spinlock_t *smem_get_remote_spinlock(void)
867{
868 if (unlikely(!spinlocks_initialized))
869 init_smem_remote_spinlock();
870 return &remote_spinlock;
871}
872EXPORT_SYMBOL(smem_get_remote_spinlock);
873
874/**
875 * smem_get_free_space() - Get the available allocation free space for a
876 * partition
877 *
878 * @to_proc: remote SMEM host. Determines the applicable partition
879 * @returns: size in bytes available to allocate
880 *
881 * Helper function for SMD so that SMD only scans the channel allocation
882 * table for a partition when it is reasonably certain that a channel has
883 * actually been created, because scanning can be expensive. Creating a channel
884 * will consume some of the free space in a partition, so SMD can compare the
885 * last free space size against the current free space size to determine if
886 * a channel may have been created. SMD can't do this directly, because the
887 * necessary partition internals are restricted to just SMEM.
888 */
889unsigned int smem_get_free_space(unsigned int to_proc)
890{
891 struct smem_partition_header *hdr;
892 struct smem_shared *shared;
893
894 if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
895 pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
896 return UINT_MAX;
897 }
898
899 if (partitions[to_proc].offset) {
900 if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
901 (uintptr_t)smem_areas[0].virt_addr,
902 partitions[to_proc].offset))) {
903 pr_err("%s: unexpected overflow detected\n", __func__);
904 return UINT_MAX;
905 }
906 hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
907 return hdr->offset_free_cached - hdr->offset_free_uncached;
908 }
909 shared = smem_ram_base;
910 return shared->heap_info.heap_remaining;
911}
912EXPORT_SYMBOL(smem_get_free_space);
913
914/**
915 * smem_get_version() - Get the smem user version number
916 *
917 * @idx: SMEM user idx in SMEM_VERSION_INFO table.
918 * @returns: smem version number if success otherwise zero.
919 */
920unsigned int smem_get_version(unsigned int idx)
921{
922 int *version_array;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530923 struct smem_shared *smem = smem_ram_base;
Chris Lewb4791c32016-08-01 11:58:55 -0700924
925 if (idx > 32) {
926 pr_err("%s: invalid idx:%d\n", __func__, idx);
927 return 0;
928 }
929
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530930 if (use_comm_partition)
931 version_array = smem->version;
932 else
933 version_array = __smem_find(SMEM_VERSION_INFO,
934 SMEM_VERSION_INFO_SIZE, true);
Chris Lewb4791c32016-08-01 11:58:55 -0700935 if (version_array == NULL)
936 return 0;
937
938 return version_array[idx];
939}
940EXPORT_SYMBOL(smem_get_version);
941
942/**
943 * init_smem_remote_spinlock - Reentrant remote spinlock initialization
944 *
945 * @returns: success or error code for failure
946 */
947static int init_smem_remote_spinlock(void)
948{
949 int rc = 0;
950
951 /*
952 * Optimistic locking. Init only needs to be done once by the first
953 * caller. After that, serializing inits between different callers
954 * is unnecessary. The second check after the lock ensures init
955 * wasn't previously completed by someone else before the lock could
956 * be grabbed.
957 */
958 if (!spinlocks_initialized) {
959 mutex_lock(&spinlock_init_lock);
960 if (!spinlocks_initialized) {
961 rc = remote_spin_lock_init(&remote_spinlock,
962 SMEM_SPINLOCK_SMEM_ALLOC);
963 if (!rc)
964 spinlocks_initialized = 1;
965 }
966 mutex_unlock(&spinlock_init_lock);
967 }
968 return rc;
969}
970
971/**
972 * smem_initialized_check - Reentrant check that smem has been initialized
973 *
974 * @returns: true if initialized, false if not.
975 */
976bool smem_initialized_check(void)
977{
978 static int checked;
979 static int is_inited;
980 unsigned long flags;
981 struct smem_shared *smem;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530982 unsigned int ver;
Chris Lewb4791c32016-08-01 11:58:55 -0700983
984 if (likely(checked)) {
985 if (unlikely(!is_inited))
986 LOG_ERR("%s: smem not initialized\n", __func__);
987 return is_inited;
988 }
989
990 spin_lock_irqsave(&smem_init_check_lock, flags);
991 if (checked) {
992 spin_unlock_irqrestore(&smem_init_check_lock, flags);
993 if (unlikely(!is_inited))
994 LOG_ERR("%s: smem not initialized\n", __func__);
995 return is_inited;
996 }
997
998 smem = smem_ram_base;
999
1000 if (smem->heap_info.initialized != 1)
1001 goto failed;
1002 if (smem->heap_info.reserved != 0)
1003 goto failed;
1004
1005 /*
1006 * The Modem SBL is now the Master SBL version and is required to
1007 * pre-initialize SMEM and fill in any necessary configuration
1008 * structures. Without the extra configuration data, the SMEM driver
1009 * cannot be properly initialized.
1010 */
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301011 ver = smem->version[MODEM_SBL_VERSION_INDEX];
1012 if (ver == SMEM_COMM_PART_VERSION << 16) {
1013 use_comm_partition = true;
1014 } else if (ver != SMEM_VERSION << 16) {
1015 pr_err("%s: SBL version not correct 0x%x\n",
1016 __func__, smem->version[7]);
Chris Lewb4791c32016-08-01 11:58:55 -07001017 goto failed;
1018 }
1019
1020 is_inited = 1;
1021 checked = 1;
1022 spin_unlock_irqrestore(&smem_init_check_lock, flags);
1023 return is_inited;
1024
1025failed:
1026 is_inited = 0;
1027 checked = 1;
1028 spin_unlock_irqrestore(&smem_init_check_lock, flags);
1029 LOG_ERR(
1030 "%s: shared memory needs to be initialized by SBL before booting\n",
1031 __func__);
1032 return is_inited;
1033}
1034EXPORT_SYMBOL(smem_initialized_check);
1035
1036static int restart_notifier_cb(struct notifier_block *this,
1037 unsigned long code,
1038 void *data)
1039{
1040 struct restart_notifier_block *notifier;
1041 struct notif_data *notifdata = data;
1042 int ret;
1043
1044 switch (code) {
1045
1046 case SUBSYS_AFTER_SHUTDOWN:
1047 notifier = container_of(this,
1048 struct restart_notifier_block, nb);
1049 SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
1050 __func__, notifier->processor,
1051 notifier->name);
1052 remote_spin_release(&remote_spinlock, notifier->processor);
1053 remote_spin_release_all(notifier->processor);
1054 break;
1055 case SUBSYS_SOC_RESET:
1056 if (!(smem_ramdump_dev && notifdata->enable_mini_ramdumps))
1057 break;
1058 case SUBSYS_RAMDUMP_NOTIFICATION:
1059 if (!(smem_ramdump_dev && (notifdata->enable_mini_ramdumps
1060 || notifdata->enable_ramdump)))
1061 break;
1062 SMEM_DBG("%s: saving ramdump\n", __func__);
1063 /*
1064 * XPU protection does not currently allow the
1065 * auxiliary memory regions to be dumped. If this
1066 * changes, then num_smem_areas + 1 should be passed
1067 * into do_elf_ramdump() to dump all regions.
1068 */
1069 ret = do_elf_ramdump(smem_ramdump_dev,
1070 smem_ramdump_segments, 1);
1071 if (ret < 0)
1072 LOG_ERR("%s: unable to dump smem %d\n", __func__, ret);
1073 break;
1074 default:
1075 break;
1076 }
1077
1078 return NOTIFY_DONE;
1079}
1080
1081static __init int modem_restart_late_init(void)
1082{
1083 int i;
1084 void *handle;
1085 struct restart_notifier_block *nb;
1086
1087 smem_ramdump_dev = create_ramdump_device("smem", NULL);
1088 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
1089 LOG_ERR("%s: Unable to create smem ramdump device.\n",
1090 __func__);
1091 smem_ramdump_dev = NULL;
1092 }
1093
1094 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
1095 nb = &restart_notifiers[i];
1096 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
1097 SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
1098 __func__, nb->name, handle);
1099 }
1100
1101 return 0;
1102}
1103late_initcall(modem_restart_late_init);
1104
1105int smem_module_init_notifier_register(struct notifier_block *nb)
1106{
1107 int ret;
1108
1109 if (!nb)
1110 return -EINVAL;
1111 mutex_lock(&smem_module_init_notifier_lock);
1112 ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
1113 if (smem_module_inited)
1114 nb->notifier_call(nb, 0, NULL);
1115 mutex_unlock(&smem_module_init_notifier_lock);
1116 return ret;
1117}
1118EXPORT_SYMBOL(smem_module_init_notifier_register);
1119
1120int smem_module_init_notifier_unregister(struct notifier_block *nb)
1121{
1122 int ret;
1123
1124 if (!nb)
1125 return -EINVAL;
1126 mutex_lock(&smem_module_init_notifier_lock);
1127 ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
1128 nb);
1129 mutex_unlock(&smem_module_init_notifier_lock);
1130 return ret;
1131}
1132EXPORT_SYMBOL(smem_module_init_notifier_unregister);
1133
1134static void smem_module_init_notify(uint32_t state, void *data)
1135{
1136 mutex_lock(&smem_module_init_notifier_lock);
1137 smem_module_inited = 1;
1138 raw_notifier_call_chain(&smem_module_init_notifier_list,
1139 state, data);
1140 mutex_unlock(&smem_module_init_notifier_lock);
1141}
1142
1143/**
1144 * smem_init_security_partition - Init local structures for a secured smem
1145 * partition that has apps as one of the hosts
1146 *
1147 * @entry: Entry in the security TOC for the partition to init
1148 * @num: Partition ID
1149 *
1150 * Initialize local data structures to point to a secured smem partition
1151 * that is accessible by apps and another processor. Assumes that one of the
1152 * listed hosts is apps. Verifiess that the partition is valid, otherwise will
1153 * skip. Checks for memory corruption and will BUG() if detected. Assumes
1154 * smem_areas is already initialized and that smem_areas[0] corresponds to the
1155 * smem region with the secured partitions.
1156 */
1157static void smem_init_security_partition(struct smem_toc_entry *entry,
1158 uint32_t num)
1159{
Channagoud Kadabi075db3b2017-03-16 14:26:17 -07001160 uint16_t remote_host = 0;
Chris Lewb4791c32016-08-01 11:58:55 -07001161 struct smem_partition_header *hdr;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301162 bool is_comm_partition = false;
Chris Lewb4791c32016-08-01 11:58:55 -07001163
1164 if (!entry->offset) {
1165 SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
1166 return;
1167 }
1168 if (!entry->size) {
1169 SMEM_INFO("Skipping smem partition %d - bad size\n", num);
1170 return;
1171 }
1172 if (!entry->size_cacheline) {
1173 SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
1174 return;
1175 }
1176
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301177 if (entry->host0 == SMEM_COMM_HOST && entry->host1 == SMEM_COMM_HOST)
1178 is_comm_partition = true;
Chris Lewb4791c32016-08-01 11:58:55 -07001179
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301180 if (!is_comm_partition) {
1181 if (entry->host0 == SMEM_APPS)
1182 remote_host = entry->host1;
1183 else
1184 remote_host = entry->host0;
1185
1186 if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
1187 SMEM_INFO(
1188 "Skipping smem partition %d - bad remote:%d\n",
1189 num, remote_host);
1190 return;
1191 }
1192 if (partitions[remote_host].offset) {
1193 SMEM_INFO(
1194 "Skipping smem partition %d - duplicate of %d\n",
1195 num, partitions[remote_host].partition_num);
1196 return;
1197 }
1198
1199 if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
1200 SMEM_INFO(
1201 "Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
1202 num, entry->offset, entry->host0, entry->host1);
1203 return;
1204 }
Chris Lewb4791c32016-08-01 11:58:55 -07001205 }
1206
1207 hdr = smem_areas[0].virt_addr + entry->offset;
1208
Chris Lewb4791c32016-08-01 11:58:55 -07001209 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
1210 LOG_ERR("Smem partition %d hdr magic is bad\n", num);
1211 BUG();
1212 }
1213 if (!hdr->size) {
1214 LOG_ERR("Smem partition %d size is 0\n", num);
1215 BUG();
1216 }
1217 if (hdr->offset_free_uncached > hdr->size) {
1218 LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
1219 BUG();
1220 }
1221 if (hdr->offset_free_cached > hdr->size) {
1222 LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
1223 BUG();
1224 }
Dhoat Harpalea38a6e2017-01-16 19:08:09 +05301225 if (is_comm_partition) {
1226 if (hdr->host0 == SMEM_COMM_HOST
1227 && hdr->host1 == SMEM_COMM_HOST) {
1228 comm_partition.partition_num = num;
1229 comm_partition.offset = entry->offset;
1230 comm_partition.size_cacheline = entry->size_cacheline;
1231 SMEM_INFO("Common Partition %d offset:%x\n", num,
1232 entry->offset);
1233 } else {
1234 LOG_ERR("Smem Comm partition hosts don't match TOC\n");
1235 WARN_ON(1);
1236 }
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301237 return;
1238 }
Chris Lewb4791c32016-08-01 11:58:55 -07001239 if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
1240 LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
1241 BUG();
1242 }
1243 if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
1244 LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
1245 BUG();
1246 }
1247
1248 partitions[remote_host].partition_num = num;
1249 partitions[remote_host].offset = entry->offset;
1250 partitions[remote_host].size_cacheline = entry->size_cacheline;
1251 SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
1252 remote_host);
1253}
1254
1255/**
1256 * smem_init_security - Init local support for secured smem
1257 *
1258 * Looks for a valid security TOC, and if one is found, parses it looking for
1259 * partitions that apps can access. If any such partitions are found, do the
1260 * required local initialization to support them. Assumes smem_areas is inited
1261 * and smem_area[0] corresponds to the smem region with the TOC.
1262 */
1263static void smem_init_security(void)
1264{
1265 struct smem_toc *toc;
1266 uint32_t i;
1267
1268 SMEM_DBG("%s\n", __func__);
1269
1270 toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
1271
1272 if (toc->identifier != SMEM_TOC_IDENTIFIER) {
1273 LOG_ERR("%s failed: invalid TOC magic\n", __func__);
1274 return;
1275 }
1276
1277 for (i = 0; i < toc->num_entries; ++i) {
1278 SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
1279 toc->entry[i].host0,
1280 toc->entry[i].host1);
1281 smem_init_security_partition(&toc->entry[i], i);
1282 }
1283
1284 SMEM_DBG("%s done\n", __func__);
1285}
1286
1287/**
1288 * smem_init_target_info - Init smem target information
1289 *
1290 * @info_addr : smem target info physical address.
1291 * @size : size of the smem target info structure.
1292 *
1293 * This function is used to initialize the smem_targ_info structure and checks
1294 * for valid identifier, if identifier is valid initialize smem variables.
1295 */
1296static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
1297{
1298 struct smem_targ_info_type *smem_targ_info;
1299 void *smem_targ_info_addr;
1300
1301 smem_targ_info_addr = ioremap_nocache(info_addr, size);
1302 if (!smem_targ_info_addr) {
1303 LOG_ERR("%s: failed ioremap_nocache() of addr:%pa size:%pa\n",
1304 __func__, &info_addr, &size);
1305 return -ENODEV;
1306 }
1307 smem_targ_info =
1308 (struct smem_targ_info_type __iomem *)smem_targ_info_addr;
1309
1310 if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
1311 LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
1312 return -ENODEV;
1313 }
1314 smem_ram_phys = smem_targ_info->phys_base_addr;
1315 smem_ram_size = smem_targ_info->size;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301316 if (smem_targ_info->max_items)
1317 smem_max_items = smem_targ_info->max_items;
Chris Lewb4791c32016-08-01 11:58:55 -07001318 iounmap(smem_targ_info_addr);
1319 return 0;
1320}
1321
1322static int msm_smem_probe(struct platform_device *pdev)
1323{
1324 char *key;
1325 struct resource *r;
1326 phys_addr_t aux_mem_base;
1327 resource_size_t aux_mem_size;
1328 int temp_string_size = 11; /* max 3 digit count */
1329 char temp_string[temp_string_size];
1330 int ret;
1331 struct ramdump_segment *ramdump_segments_tmp = NULL;
1332 struct smem_area *smem_areas_tmp = NULL;
1333 int smem_idx = 0;
1334 bool security_enabled;
1335
1336 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1337 "smem_targ_info_imem");
1338 if (r) {
1339 if (smem_init_target_info(r->start, resource_size(r)))
1340 goto smem_targ_info_legacy;
1341 goto smem_targ_info_done;
1342 }
1343
1344 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1345 "smem_targ_info_reg");
1346 if (r) {
1347 void *reg_base_addr;
1348 uint64_t base_addr;
1349
1350 reg_base_addr = ioremap_nocache(r->start, resource_size(r));
1351 base_addr = (uint32_t)readl_relaxed(reg_base_addr);
1352 base_addr |=
1353 ((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
1354 iounmap(reg_base_addr);
1355 if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
1356 SMEM_INFO("%s: Invalid SMEM address\n", __func__);
1357 goto smem_targ_info_legacy;
1358 }
1359 if (smem_init_target_info(base_addr,
1360 sizeof(struct smem_targ_info_type)))
1361 goto smem_targ_info_legacy;
1362 goto smem_targ_info_done;
1363 }
1364
1365smem_targ_info_legacy:
1366 SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
1367 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
1368 if (r) {
1369 smem_ram_size = resource_size(r);
1370 smem_ram_phys = r->start;
1371 }
1372
1373smem_targ_info_done:
1374 if (!smem_ram_phys || !smem_ram_size) {
1375 LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
1376 return -ENODEV;
1377 }
1378
1379 smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
1380
1381 if (!smem_ram_base) {
1382 LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
1383 __func__,
1384 &smem_ram_phys, &smem_ram_size);
1385 return -ENODEV;
1386 }
1387
1388 if (!smem_initialized_check())
1389 return -ENODEV;
1390
1391 /*
1392 * The software implementation requires smem_find(), which needs
1393 * smem_ram_base to be intitialized. The remote spinlock item is
1394 * guaranteed to be allocated by the bootloader, so this is the
1395 * safest and earliest place to init the spinlock.
1396 */
1397 ret = init_smem_remote_spinlock();
1398 if (ret) {
1399 LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
1400 return ret;
1401 }
1402
1403 key = "irq-reg-base";
1404 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
1405 if (!r) {
1406 LOG_ERR("%s: missing '%s'\n", __func__, key);
1407 return -ENODEV;
1408 }
1409
1410 num_smem_areas = 1;
1411 while (1) {
1412 scnprintf(temp_string, temp_string_size, "aux-mem%d",
1413 num_smem_areas);
1414 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1415 temp_string);
1416 if (!r)
1417 break;
1418
1419 ++num_smem_areas;
1420 if (num_smem_areas > 999) {
1421 LOG_ERR("%s: max num aux mem regions reached\n",
1422 __func__);
1423 break;
1424 }
1425 }
1426 /* Initialize main SMEM region and SSR ramdump region */
1427 smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
1428 GFP_KERNEL);
1429 if (!smem_areas_tmp) {
1430 LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
1431 ret = -ENOMEM;
1432 goto free_smem_areas;
1433 }
1434
1435 ramdump_segments_tmp = kcalloc(num_smem_areas,
1436 sizeof(struct ramdump_segment), GFP_KERNEL);
1437 if (!ramdump_segments_tmp) {
1438 LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
1439 ret = -ENOMEM;
1440 goto free_smem_areas;
1441 }
1442 smem_areas_tmp[smem_idx].phys_addr = smem_ram_phys;
1443 smem_areas_tmp[smem_idx].size = smem_ram_size;
1444 smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
1445
1446 ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
1447 ramdump_segments_tmp[smem_idx].size = smem_ram_size;
1448 ++smem_idx;
1449
1450 /* Configure auxiliary SMEM regions */
1451 while (1) {
1452 scnprintf(temp_string, temp_string_size, "aux-mem%d",
1453 smem_idx);
1454 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1455 temp_string);
1456 if (!r)
1457 break;
1458 aux_mem_base = r->start;
1459 aux_mem_size = resource_size(r);
1460
1461 ramdump_segments_tmp[smem_idx].address = aux_mem_base;
1462 ramdump_segments_tmp[smem_idx].size = aux_mem_size;
1463
1464 smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
1465 smem_areas_tmp[smem_idx].size = aux_mem_size;
1466 smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
1467 (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
1468 smem_areas_tmp[smem_idx].size);
1469 SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
1470 &aux_mem_base, &aux_mem_size,
1471 smem_areas_tmp[smem_idx].virt_addr);
1472
1473 if (!smem_areas_tmp[smem_idx].virt_addr) {
1474 LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
1475 __func__,
1476 &smem_areas_tmp[smem_idx].phys_addr,
1477 &smem_areas_tmp[smem_idx].size);
1478 ret = -ENOMEM;
1479 goto free_smem_areas;
1480 }
1481
1482 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
1483 (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
1484 smem_areas_tmp[smem_idx].size)) {
1485 LOG_ERR(
1486 "%s: invalid virtual address block %i: %p:%pa\n",
1487 __func__, smem_idx,
1488 smem_areas_tmp[smem_idx].virt_addr,
1489 &smem_areas_tmp[smem_idx].size);
1490 ++smem_idx;
1491 ret = -EINVAL;
1492 goto free_smem_areas;
1493 }
1494
1495 ++smem_idx;
1496 if (smem_idx > 999) {
1497 LOG_ERR("%s: max num aux mem regions reached\n",
1498 __func__);
1499 break;
1500 }
1501 }
1502
1503 smem_areas = smem_areas_tmp;
1504 smem_ramdump_segments = ramdump_segments_tmp;
1505
1506 key = "qcom,mpu-enabled";
1507 security_enabled = of_property_read_bool(pdev->dev.of_node, key);
1508 if (security_enabled) {
1509 SMEM_INFO("smem security enabled\n");
1510 smem_init_security();
1511 }
1512
1513 probe_done = true;
1514
1515 ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1516 if (ret)
1517 LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
1518
1519 return 0;
1520
1521free_smem_areas:
1522 for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
1523 iounmap(smem_areas_tmp[smem_idx].virt_addr);
1524
1525 num_smem_areas = 0;
1526 kfree(ramdump_segments_tmp);
1527 kfree(smem_areas_tmp);
1528 return ret;
1529}
1530
1531static const struct of_device_id msm_smem_match_table[] = {
1532 { .compatible = "qcom,smem" },
1533 {},
1534};
1535
1536static struct platform_driver msm_smem_driver = {
1537 .probe = msm_smem_probe,
1538 .driver = {
1539 .name = "msm_smem",
1540 .owner = THIS_MODULE,
1541 .of_match_table = msm_smem_match_table,
1542 },
1543};
1544
1545int __init msm_smem_init(void)
1546{
1547 static bool registered;
1548 int rc;
1549
1550 if (registered)
1551 return 0;
1552
1553 registered = true;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301554 smem_max_items = SMEM_NUM_ITEMS;
Chris Lewb4791c32016-08-01 11:58:55 -07001555 smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
1556 if (!smem_ipc_log_ctx) {
1557 pr_err("%s: unable to create logging context\n", __func__);
1558 msm_smem_debug_mask = 0;
1559 }
1560
1561 rc = platform_driver_register(&msm_smem_driver);
1562 if (rc) {
1563 LOG_ERR("%s: msm_smem_driver register failed %d\n",
1564 __func__, rc);
1565 return rc;
1566 }
1567
1568 smem_module_init_notify(0, NULL);
1569
1570 return 0;
1571}
1572
1573arch_initcall(msm_smem_init);