blob: 1bbd7514188f82a7b4404c51ee7da7f83fbd82c5 [file] [log] [blame]
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Chris Lewb4791c32016-08-01 11:58:55 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/export.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/ipc_logging.h>
17#include <linux/kernel.h>
18#include <linux/moduleparam.h>
19#include <linux/notifier.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/printk.h>
24#include <linux/slab.h>
25#include <linux/stat.h>
26#include <soc/qcom/subsystem_notif.h>
27#include <soc/qcom/subsystem_restart.h>
28#include <soc/qcom/ramdump.h>
29
30#include <soc/qcom/smem.h>
31
32
33#include "smem_private.h"
34
35#define MODEM_SBL_VERSION_INDEX 7
36#define SMEM_VERSION_INFO_SIZE (32 * 4)
37#define SMEM_VERSION 0x000B
38
39enum {
40 MSM_SMEM_DEBUG = 1U << 0,
41 MSM_SMEM_INFO = 1U << 1,
42};
43
44static int msm_smem_debug_mask = MSM_SMEM_INFO;
45module_param_named(debug_mask, msm_smem_debug_mask,
46 int, S_IRUGO | S_IWUSR | S_IWGRP);
47static void *smem_ipc_log_ctx;
48#define NUM_LOG_PAGES 4
49
50#define IPC_LOG(x...) do { \
51 if (smem_ipc_log_ctx) \
52 ipc_log_string(smem_ipc_log_ctx, x); \
53 } while (0)
54
55
56#define LOG_ERR(x...) do { \
57 pr_err(x); \
58 IPC_LOG(x); \
59 } while (0)
60#define SMEM_DBG(x...) do { \
61 if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
62 IPC_LOG(x); \
63 } while (0)
64#define SMEM_INFO(x...) do { \
65 if (msm_smem_debug_mask & MSM_SMEM_INFO) \
66 IPC_LOG(x); \
67 } while (0)
68
69#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
70
71static void *smem_ram_base;
72static resource_size_t smem_ram_size;
73static phys_addr_t smem_ram_phys;
74static remote_spinlock_t remote_spinlock;
75static uint32_t num_smem_areas;
76static struct smem_area *smem_areas;
77static struct ramdump_segment *smem_ramdump_segments;
78static int spinlocks_initialized;
79static void *smem_ramdump_dev;
80static DEFINE_MUTEX(spinlock_init_lock);
81static DEFINE_SPINLOCK(smem_init_check_lock);
Chris Lew4de82312016-10-12 14:13:52 -070082static struct device *smem_dev;
Chris Lewb4791c32016-08-01 11:58:55 -070083static int smem_module_inited;
84static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
85static DEFINE_MUTEX(smem_module_init_notifier_lock);
86static bool probe_done;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +053087uint32_t smem_max_items;
Chris Lewb4791c32016-08-01 11:58:55 -070088
89/* smem security feature components */
90#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
91#define SMEM_TOC_MAX_EXCLUSIONS 4
92#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
93#define SMEM_ALLOCATION_CANARY 0xa5a5
94
95struct smem_toc_entry {
96 uint32_t offset;
97 uint32_t size;
98 uint32_t flags;
99 uint16_t host0;
100 uint16_t host1;
101 uint32_t size_cacheline;
102 uint32_t reserved[3];
103 uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
104};
105
106struct smem_toc {
107 /* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
108 uint32_t identifier;
109 uint32_t version;
110 uint32_t num_entries;
111 uint32_t reserved[5];
112 struct smem_toc_entry entry[];
113};
114
115struct smem_partition_header {
116 /* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
117 uint32_t identifier;
118 uint16_t host0;
119 uint16_t host1;
120 uint32_t size;
121 uint32_t offset_free_uncached;
122 uint32_t offset_free_cached;
123 uint32_t reserved[3];
124};
125
126struct smem_partition_allocation_header {
127 /* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
128 uint16_t canary;
129 uint16_t smem_type;
130 uint32_t size; /* includes padding bytes */
131 uint16_t padding_data;
132 uint16_t padding_hdr;
133 uint32_t reserved[1];
134};
135
136struct smem_partition_info {
137 uint32_t partition_num;
138 uint32_t offset;
139 uint32_t size_cacheline;
140};
141
142static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530143
144#define SMEM_COMM_PART_VERSION 0x000C
145#define SMEM_COMM_HOST 0xFFFE
146static bool use_comm_partition;
147static struct smem_partition_info comm_partition;
Chris Lewb4791c32016-08-01 11:58:55 -0700148/* end smem security feature components */
149
150/* Identifier for the SMEM target info struct. */
151#define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
152
153struct smem_targ_info_type {
154 /* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
155 uint32_t identifier;
156 uint32_t size;
157 phys_addr_t phys_base_addr;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530158 uint32_t max_items;
Chris Lewb4791c32016-08-01 11:58:55 -0700159};
160
161struct restart_notifier_block {
162 unsigned int processor;
163 char *name;
164 struct notifier_block nb;
165};
166
167static int restart_notifier_cb(struct notifier_block *this,
168 unsigned long code,
169 void *data);
170
171static struct restart_notifier_block restart_notifiers[] = {
172 {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
173 {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
174 {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
175 {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
176 {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
177 {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
178 {SMEM_DSPS, "slpi", .nb.notifier_call = restart_notifier_cb},
179};
180
181static int init_smem_remote_spinlock(void);
182
183/**
184 * is_probe_done() - Did the probe function successfully complete
185 *
186 * @return - true if probe successfully completed, false if otherwise
187 *
188 * Helper function for EPROBE_DEFER support. If this function returns false,
189 * the calling function should immediately return -EPROBE_DEFER.
190 */
191static bool is_probe_done(void)
192{
193 return probe_done;
194}
195
196/**
197 * smem_phys_to_virt() - Convert a physical base and offset to virtual address
198 *
199 * @base: physical base address to check
200 * @offset: offset from the base to get the final address
201 * @returns: virtual SMEM address; NULL for failure
202 *
203 * Takes a physical address and an offset and checks if the resulting physical
204 * address would fit into one of the smem regions. If so, returns the
205 * corresponding virtual address. Otherwise returns NULL.
206 */
207static void *smem_phys_to_virt(phys_addr_t base, unsigned int offset)
208{
209 int i;
210 phys_addr_t phys_addr;
211 resource_size_t size;
212
213 if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
214 return NULL;
215
216 if (!smem_areas) {
217 /*
218 * Early boot - no area configuration yet, so default
219 * to using the main memory region.
220 *
221 * To remove the MSM_SHARED_RAM_BASE and the static
222 * mapping of SMEM in the future, add dump_stack()
223 * to identify the early callers of smem_get_entry()
224 * (which calls this function) and replace those calls
225 * with a new function that knows how to lookup the
226 * SMEM base address before SMEM has been probed.
227 */
228 phys_addr = smem_ram_phys;
229 size = smem_ram_size;
230
231 if (base >= phys_addr && base + offset < phys_addr + size) {
232 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
233 (uintptr_t)smem_ram_base, offset)) {
234 SMEM_INFO("%s: overflow %p %x\n", __func__,
235 smem_ram_base, offset);
236 return NULL;
237 }
238
239 return smem_ram_base + offset;
240 } else {
241 return NULL;
242 }
243 }
244 for (i = 0; i < num_smem_areas; ++i) {
245 phys_addr = smem_areas[i].phys_addr;
246 size = smem_areas[i].size;
247
248 if (base < phys_addr || base + offset >= phys_addr + size)
249 continue;
250
251 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
252 (uintptr_t)smem_areas[i].virt_addr, offset)) {
253 SMEM_INFO("%s: overflow %p %x\n", __func__,
254 smem_areas[i].virt_addr, offset);
255 return NULL;
256 }
257
258 return smem_areas[i].virt_addr + offset;
259 }
260
261 return NULL;
262}
263
264/**
265 * smem_virt_to_phys() - Convert SMEM address to physical address.
266 *
267 * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
268 * @returns: Physical address (or NULL if there is a failure)
269 *
270 * This function should only be used if an SMEM item needs to be handed
271 * off to a DMA engine. This function will not return a version of EPROBE_DEFER
272 * if the driver is not ready since the caller should obtain @smem_address from
273 * one of the other public APIs and get EPROBE_DEFER at that time, if
274 * applicable.
275 */
276phys_addr_t smem_virt_to_phys(void *smem_address)
277{
278 phys_addr_t phys_addr = 0;
279 int i;
280 void *vend;
281
282 if (!smem_areas)
283 return phys_addr;
284
285 for (i = 0; i < num_smem_areas; ++i) {
286 vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
287
288 if (smem_address >= smem_areas[i].virt_addr &&
289 smem_address < vend) {
290 phys_addr = smem_address - smem_areas[i].virt_addr;
291 phys_addr += smem_areas[i].phys_addr;
292 break;
293 }
294 }
295
296 return phys_addr;
297}
298EXPORT_SYMBOL(smem_virt_to_phys);
299
300/**
301 * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
302 *
303 * @id: ID of SMEM item
304 * @size: Pointer to size variable for storing the result
305 * @skip_init_check: True means do not verify that SMEM has been initialized
306 * @use_rspinlock: True to use the remote spinlock
307 * @returns: Pointer to SMEM item or NULL if it doesn't exist
308 */
309static void *__smem_get_entry_nonsecure(unsigned int id, unsigned int *size,
310 bool skip_init_check, bool use_rspinlock)
311{
312 struct smem_shared *shared = smem_ram_base;
313 struct smem_heap_entry *toc = shared->heap_toc;
314 int use_spinlocks = spinlocks_initialized && use_rspinlock;
315 void *ret = 0;
316 unsigned long flags = 0;
317 int rc;
318
319 if (!skip_init_check && !smem_initialized_check())
320 return ret;
321
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530322 if (id >= smem_max_items)
Chris Lewb4791c32016-08-01 11:58:55 -0700323 return ret;
324
325 if (use_spinlocks) {
326 do {
327 rc = remote_spin_trylock_irqsave(&remote_spinlock,
328 flags);
329 } while (!rc);
330 }
331 /* toc is in device memory and cannot be speculatively accessed */
332 if (toc[id].allocated) {
333 phys_addr_t phys_base;
334
335 *size = toc[id].size;
336 barrier();
337
338 phys_base = toc[id].reserved & BASE_ADDR_MASK;
339 if (!phys_base)
340 phys_base = smem_ram_phys;
341 ret = smem_phys_to_virt(phys_base, toc[id].offset);
342 } else {
343 *size = 0;
344 }
345 if (use_spinlocks)
346 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
347
348 return ret;
349}
350
351/**
352 * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
353 * security support
354 *
355 * @id: ID of SMEM item
356 * @size: Pointer to size variable for storing the result
357 * @to_proc: SMEM host that shares the item with apps
358 * @flags: Item attribute flags
359 * @skip_init_check: True means do not verify that SMEM has been initialized
360 * @use_rspinlock: True to use the remote spinlock
361 * @returns: Pointer to SMEM item or NULL if it doesn't exist
362 */
363static void *__smem_get_entry_secure(unsigned int id,
364 unsigned int *size,
365 unsigned int to_proc,
366 unsigned int flags,
367 bool skip_init_check,
368 bool use_rspinlock)
369{
370 struct smem_partition_header *hdr;
371 unsigned long lflags = 0;
372 void *item = NULL;
373 struct smem_partition_allocation_header *alloc_hdr;
374 uint32_t partition_num;
375 uint32_t a_hdr_size;
376 int rc;
377
Dhoat Harpale2192202016-12-07 16:14:04 +0530378 SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
Chris Lewb4791c32016-08-01 11:58:55 -0700379 flags, skip_init_check, use_rspinlock);
380
381 if (!skip_init_check && !smem_initialized_check())
382 return NULL;
383
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530384 if (id >= smem_max_items) {
Chris Lewb4791c32016-08-01 11:58:55 -0700385 SMEM_INFO("%s: invalid id %d\n", __func__, id);
386 return NULL;
387 }
388
389 if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
390 SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
391 to_proc);
392 return NULL;
393 }
394
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530395 if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
396 if (use_comm_partition) {
397 partition_num = comm_partition.partition_num;
398 hdr = smem_areas[0].virt_addr + comm_partition.offset;
399 } else {
400 return __smem_get_entry_nonsecure(id, size,
401 skip_init_check, use_rspinlock);
402 }
403 } else {
404 partition_num = partitions[to_proc].partition_num;
405 hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
406 }
Chris Lewb4791c32016-08-01 11:58:55 -0700407 if (unlikely(!spinlocks_initialized)) {
408 rc = init_smem_remote_spinlock();
409 if (unlikely(rc)) {
410 SMEM_INFO(
411 "%s: id:%u remote spinlock init failed %d\n",
412 __func__, id, rc);
413 return NULL;
414 }
415 }
416 if (use_rspinlock) {
417 do {
418 rc = remote_spin_trylock_irqsave(&remote_spinlock,
419 lflags);
420 } while (!rc);
421 }
422 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
423 LOG_ERR(
424 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
425 __func__,
426 partition_num,
427 to_proc,
428 hdr);
429 BUG();
430 }
431
432 if (flags & SMEM_ITEM_CACHED_FLAG) {
433 a_hdr_size = ALIGN(sizeof(*alloc_hdr),
434 partitions[to_proc].size_cacheline);
435 for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
436 (void *)(alloc_hdr) > (void *)(hdr) +
437 hdr->offset_free_cached;
438 alloc_hdr = (void *)(alloc_hdr) -
439 alloc_hdr->size - a_hdr_size) {
440 if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
441 LOG_ERR(
442 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
443 __func__,
444 partition_num,
445 to_proc,
446 alloc_hdr);
447 BUG();
448
449 }
450 if (alloc_hdr->smem_type == id) {
451 /* 8 byte alignment to match legacy */
452 *size = ALIGN(alloc_hdr->size -
453 alloc_hdr->padding_data, 8);
454 item = (void *)(alloc_hdr) - alloc_hdr->size;
455 break;
456 }
457 }
458 } else {
459 for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
460 (void *)(alloc_hdr) < (void *)(hdr) +
461 hdr->offset_free_uncached;
462 alloc_hdr = (void *)(alloc_hdr) +
463 sizeof(*alloc_hdr) +
464 alloc_hdr->padding_hdr +
465 alloc_hdr->size) {
466 if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
467 LOG_ERR(
468 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
469 __func__,
470 partition_num,
471 to_proc,
472 alloc_hdr);
473 BUG();
474
475 }
476 if (alloc_hdr->smem_type == id) {
477 /* 8 byte alignment to match legacy */
478 *size = ALIGN(alloc_hdr->size -
479 alloc_hdr->padding_data, 8);
480 item = (void *)(alloc_hdr) +
481 sizeof(*alloc_hdr) +
482 alloc_hdr->padding_hdr;
483 break;
484 }
485 }
486 }
487 if (use_rspinlock)
488 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
489
490 return item;
491}
492
493static void *__smem_find(unsigned int id, unsigned int size_in,
494 bool skip_init_check)
495{
496 unsigned int size;
497 void *ptr;
498
499 ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
500 if (!ptr)
501 return 0;
502
503 size_in = ALIGN(size_in, 8);
504 if (size_in != size) {
505 SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
506 id, size_in, size);
507 return 0;
508 }
509
510 return ptr;
511}
512
513/**
514 * smem_find - Find existing item with security support
515 *
516 * @id: ID of SMEM item
517 * @size_in: Size of the SMEM item
518 * @to_proc: SMEM host that shares the item with apps
519 * @flags: Item attribute flags
520 * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
521 * if the driver is not ready
522 */
523void *smem_find(unsigned int id, unsigned int size_in, unsigned int to_proc,
524 unsigned int flags)
525{
526 unsigned int size;
527 void *ptr;
528
529 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
530 flags);
531
532 /*
533 * Handle the circular dependecy between SMEM and software implemented
534 * remote spinlocks. SMEM must initialize the remote spinlocks in
535 * probe() before it is done. EPROBE_DEFER handling will not resolve
536 * this code path, so we must be intellegent to know that the spinlock
537 * item is a special case.
538 */
539 if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
540 return ERR_PTR(-EPROBE_DEFER);
541
542 ptr = smem_get_entry(id, &size, to_proc, flags);
543 if (!ptr)
544 return 0;
545
546 size_in = ALIGN(size_in, 8);
547 if (size_in != size) {
548 SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
549 id, size_in, to_proc, flags, size);
550 return 0;
551 }
552
553 return ptr;
554}
555EXPORT_SYMBOL(smem_find);
556
557/**
558 * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
559 *
560 * @id: ID of SMEM item
561 * @size_in: Size to allocate
562 * @returns: Pointer to SMEM item or NULL for error
563 *
564 * Assumes the id parameter is valid and does not already exist. Assumes
565 * size_in is already adjusted for alignment, if necessary. Requires the
566 * remote spinlock to already be locked.
567 */
568static void *alloc_item_nonsecure(unsigned int id, unsigned int size_in)
569{
570 void *smem_base = smem_ram_base;
571 struct smem_shared *shared = smem_base;
572 struct smem_heap_entry *toc = shared->heap_toc;
573 void *ret = NULL;
574
575 if (shared->heap_info.heap_remaining >= size_in) {
576 toc[id].offset = shared->heap_info.free_offset;
577 toc[id].size = size_in;
578 /*
579 * wmb() is necessary to ensure the allocation data is
580 * consistent before setting the allocated flag to prevent race
581 * conditions with remote processors
582 */
583 wmb();
584 toc[id].allocated = 1;
585
586 shared->heap_info.free_offset += size_in;
587 shared->heap_info.heap_remaining -= size_in;
588 ret = smem_base + toc[id].offset;
589 /*
590 * wmb() is necessary to ensure the heap data is consistent
591 * before continuing to prevent race conditions with remote
592 * processors
593 */
594 wmb();
595 } else {
596 SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
597 __func__, id, shared->heap_info.heap_remaining,
598 size_in);
599 }
600
601 return ret;
602}
603
604/**
605 * alloc_item_secure - Allocate an SMEM item in a secure partition
606 *
607 * @id: ID of SMEM item
608 * @size_in: Size to allocate
609 * @to_proc: SMEM host that shares the item with apps
610 * @flags: Item attribute flags
611 * @returns: Pointer to SMEM item or NULL for error
612 *
613 * Assumes the id parameter is valid and does not already exist. Assumes
614 * size_in is the raw size requested by the client. Assumes to_proc is a valid
615 * host, and a valid partition to that host exists. Requires the remote
616 * spinlock to already be locked.
617 */
618static void *alloc_item_secure(unsigned int id, unsigned int size_in,
619 unsigned int to_proc, unsigned int flags)
620{
621 void *smem_base = smem_ram_base;
622 struct smem_partition_header *hdr;
623 struct smem_partition_allocation_header *alloc_hdr;
624 uint32_t a_hdr_size;
625 uint32_t a_data_size;
626 uint32_t size_cacheline;
627 uint32_t free_space;
628 uint32_t partition_num;
629 void *ret = NULL;
630
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530631 if (to_proc == SMEM_COMM_HOST) {
632 hdr = smem_base + comm_partition.offset;
633 partition_num = comm_partition.partition_num;
634 size_cacheline = comm_partition.size_cacheline;
635 } else if (to_proc < NUM_SMEM_SUBSYSTEMS) {
636 hdr = smem_base + partitions[to_proc].offset;
637 partition_num = partitions[to_proc].partition_num;
638 size_cacheline = partitions[to_proc].size_cacheline;
639 } else {
640 SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
641 to_proc, id);
642 return NULL;
643 }
Chris Lewb4791c32016-08-01 11:58:55 -0700644
645 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
646 LOG_ERR(
647 "%s: SMEM corruption detected. Partition %d to %d at %p\n",
648 __func__,
649 partition_num,
650 to_proc,
651 hdr);
652 BUG();
653 }
654
Chris Lewb4791c32016-08-01 11:58:55 -0700655 free_space = hdr->offset_free_cached -
656 hdr->offset_free_uncached;
657
658 if (flags & SMEM_ITEM_CACHED_FLAG) {
659 a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
660 a_data_size = ALIGN(size_in, size_cacheline);
661 if (free_space < a_hdr_size + a_data_size) {
662 SMEM_INFO(
663 "%s: id %u not enough memory %u (required %u)\n",
664 __func__, id, free_space,
665 a_hdr_size + a_data_size);
666 return ret;
667 }
668 alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
669 a_hdr_size;
670 alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
671 alloc_hdr->smem_type = id;
672 alloc_hdr->size = a_data_size;
673 alloc_hdr->padding_data = a_data_size - size_in;
674 alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
675 hdr->offset_free_cached = hdr->offset_free_cached -
676 a_hdr_size - a_data_size;
677 ret = (void *)(alloc_hdr) - a_data_size;
678 /*
679 * The SMEM protocol currently does not support cacheable
680 * areas within the smem region, but if it ever does in the
681 * future, then cache management needs to be done here.
682 * The area of memory this item is allocated from will need to
683 * be dynamically made cachable, and a cache flush of the
684 * allocation header using __cpuc_flush_dcache_area and
685 * outer_flush_area will need to be done.
686 */
687 } else {
688 a_hdr_size = sizeof(*alloc_hdr);
689 a_data_size = ALIGN(size_in, 8);
690 if (free_space < a_hdr_size + a_data_size) {
691 SMEM_INFO(
692 "%s: id %u not enough memory %u (required %u)\n",
693 __func__, id, free_space,
694 a_hdr_size + a_data_size);
695 return ret;
696 }
697 alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
698 alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
699 alloc_hdr->smem_type = id;
700 alloc_hdr->size = a_data_size;
701 alloc_hdr->padding_data = a_data_size - size_in;
702 alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
703 hdr->offset_free_uncached = hdr->offset_free_uncached +
704 a_hdr_size + a_data_size;
705 ret = alloc_hdr + 1;
706 }
707 /*
708 * wmb() is necessary to ensure the heap and allocation data is
709 * consistent before continuing to prevent race conditions with remote
710 * processors
711 */
712 wmb();
713
714 return ret;
715}
716
717/**
718 * smem_alloc - Find an existing item, otherwise allocate it with security
719 * support
720 *
721 * @id: ID of SMEM item
722 * @size_in: Size of the SMEM item
723 * @to_proc: SMEM host that shares the item with apps
724 * @flags: Item attribute flags
725 * @returns: Pointer to SMEM item, NULL if it couldn't be found/allocated,
726 * or -EPROBE_DEFER if the driver is not ready
727 */
728void *smem_alloc(unsigned int id, unsigned int size_in, unsigned int to_proc,
729 unsigned int flags)
730{
731 unsigned long lflags;
732 void *ret = NULL;
733 int rc;
734 unsigned int size_out;
735 unsigned int a_size_in;
736
737 SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
738 flags);
739
740 if (!is_probe_done())
741 return ERR_PTR(-EPROBE_DEFER);
742
743 if (!smem_initialized_check())
744 return NULL;
745
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530746 if (id >= smem_max_items) {
Chris Lewb4791c32016-08-01 11:58:55 -0700747 SMEM_INFO("%s: invalid id %u\n", __func__, id);
748 return NULL;
749 }
750
751 if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
752 SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
753 to_proc, id);
754 return NULL;
755 }
756
757 if (unlikely(!spinlocks_initialized)) {
758 rc = init_smem_remote_spinlock();
759 if (unlikely(rc)) {
760 SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
761 __func__, id, rc);
762 return NULL;
763 }
764 }
765
766 a_size_in = ALIGN(size_in, 8);
767 do {
768 rc = remote_spin_trylock_irqsave(&remote_spinlock, lflags);
769 } while (!rc);
770
771 ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
772 false);
773 if (ret) {
774 SMEM_INFO("%s: %u already allocated\n", __func__, id);
775 if (a_size_in == size_out) {
776 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
777 return ret;
778 }
779 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
780 SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
781 __func__, id, size_out, a_size_in);
782 return NULL;
783 }
784
785 if (id > SMEM_FIXED_ITEM_LAST) {
786 SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
787 __func__, id, size_in, to_proc, flags);
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530788 if (flags & SMEM_ANY_HOST_FLAG
789 || !partitions[to_proc].offset) {
790 if (use_comm_partition)
791 ret = alloc_item_secure(id, size_in,
792 SMEM_COMM_HOST, flags);
793 else
794 ret = alloc_item_nonsecure(id, a_size_in);
795 } else {
Chris Lewb4791c32016-08-01 11:58:55 -0700796 ret = alloc_item_secure(id, size_in, to_proc, flags);
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530797 }
Chris Lewb4791c32016-08-01 11:58:55 -0700798 } else {
799 SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
800 __func__, id);
801 }
802
803 remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
804 return ret;
805}
806EXPORT_SYMBOL(smem_alloc);
807
808/**
809 * smem_get_entry - Get existing item with security support
810 *
811 * @id: ID of SMEM item
812 * @size: Pointer to size variable for storing the result
813 * @to_proc: SMEM host that shares the item with apps
814 * @flags: Item attribute flags
815 * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
816 * if the driver isn't ready
817 */
818void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
819 unsigned int flags)
820{
Dhoat Harpale2192202016-12-07 16:14:04 +0530821 SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
Chris Lewb4791c32016-08-01 11:58:55 -0700822
823 /*
824 * Handle the circular dependecy between SMEM and software implemented
825 * remote spinlocks. SMEM must initialize the remote spinlocks in
826 * probe() before it is done. EPROBE_DEFER handling will not resolve
827 * this code path, so we must be intellegent to know that the spinlock
828 * item is a special case.
829 */
830 if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
831 return ERR_PTR(-EPROBE_DEFER);
832
833 return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
834}
835EXPORT_SYMBOL(smem_get_entry);
836
837/**
838 * smem_get_entry_no_rlock - Get existing item without using remote spinlock
839 *
840 * @id: ID of SMEM item
841 * @size_out: Pointer to size variable for storing the result
842 * @to_proc: SMEM host that shares the item with apps
843 * @flags: Item attribute flags
844 * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
845 * if the driver isn't ready
846 *
847 * This function does not lock the remote spinlock and should only be used in
848 * failure-recover cases such as retrieving the subsystem failure reason during
849 * subsystem restart.
850 */
851void *smem_get_entry_no_rlock(unsigned int id, unsigned int *size_out,
852 unsigned int to_proc, unsigned int flags)
853{
854 if (!is_probe_done())
855 return ERR_PTR(-EPROBE_DEFER);
856
857 return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
858 false);
859}
860EXPORT_SYMBOL(smem_get_entry_no_rlock);
861
862/**
863 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
864 *
865 * @returns: pointer to SMEM remote spinlock
866 */
867remote_spinlock_t *smem_get_remote_spinlock(void)
868{
869 if (unlikely(!spinlocks_initialized))
870 init_smem_remote_spinlock();
871 return &remote_spinlock;
872}
873EXPORT_SYMBOL(smem_get_remote_spinlock);
874
875/**
876 * smem_get_free_space() - Get the available allocation free space for a
877 * partition
878 *
879 * @to_proc: remote SMEM host. Determines the applicable partition
880 * @returns: size in bytes available to allocate
881 *
882 * Helper function for SMD so that SMD only scans the channel allocation
883 * table for a partition when it is reasonably certain that a channel has
884 * actually been created, because scanning can be expensive. Creating a channel
885 * will consume some of the free space in a partition, so SMD can compare the
886 * last free space size against the current free space size to determine if
887 * a channel may have been created. SMD can't do this directly, because the
888 * necessary partition internals are restricted to just SMEM.
889 */
890unsigned int smem_get_free_space(unsigned int to_proc)
891{
892 struct smem_partition_header *hdr;
893 struct smem_shared *shared;
894
895 if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
896 pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
897 return UINT_MAX;
898 }
899
900 if (partitions[to_proc].offset) {
901 if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
902 (uintptr_t)smem_areas[0].virt_addr,
903 partitions[to_proc].offset))) {
904 pr_err("%s: unexpected overflow detected\n", __func__);
905 return UINT_MAX;
906 }
907 hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
908 return hdr->offset_free_cached - hdr->offset_free_uncached;
909 }
910 shared = smem_ram_base;
911 return shared->heap_info.heap_remaining;
912}
913EXPORT_SYMBOL(smem_get_free_space);
914
915/**
916 * smem_get_version() - Get the smem user version number
917 *
918 * @idx: SMEM user idx in SMEM_VERSION_INFO table.
919 * @returns: smem version number if success otherwise zero.
920 */
921unsigned int smem_get_version(unsigned int idx)
922{
923 int *version_array;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530924 struct smem_shared *smem = smem_ram_base;
Chris Lewb4791c32016-08-01 11:58:55 -0700925
926 if (idx > 32) {
927 pr_err("%s: invalid idx:%d\n", __func__, idx);
928 return 0;
929 }
930
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530931 if (use_comm_partition)
932 version_array = smem->version;
933 else
934 version_array = __smem_find(SMEM_VERSION_INFO,
935 SMEM_VERSION_INFO_SIZE, true);
Chris Lewb4791c32016-08-01 11:58:55 -0700936 if (version_array == NULL)
937 return 0;
938
939 return version_array[idx];
940}
941EXPORT_SYMBOL(smem_get_version);
942
943/**
944 * init_smem_remote_spinlock - Reentrant remote spinlock initialization
945 *
946 * @returns: success or error code for failure
947 */
948static int init_smem_remote_spinlock(void)
949{
950 int rc = 0;
951
952 /*
953 * Optimistic locking. Init only needs to be done once by the first
954 * caller. After that, serializing inits between different callers
955 * is unnecessary. The second check after the lock ensures init
956 * wasn't previously completed by someone else before the lock could
957 * be grabbed.
958 */
959 if (!spinlocks_initialized) {
960 mutex_lock(&spinlock_init_lock);
961 if (!spinlocks_initialized) {
962 rc = remote_spin_lock_init(&remote_spinlock,
963 SMEM_SPINLOCK_SMEM_ALLOC);
964 if (!rc)
965 spinlocks_initialized = 1;
966 }
967 mutex_unlock(&spinlock_init_lock);
968 }
969 return rc;
970}
971
972/**
973 * smem_initialized_check - Reentrant check that smem has been initialized
974 *
975 * @returns: true if initialized, false if not.
976 */
977bool smem_initialized_check(void)
978{
979 static int checked;
980 static int is_inited;
981 unsigned long flags;
982 struct smem_shared *smem;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +0530983 unsigned int ver;
Chris Lewb4791c32016-08-01 11:58:55 -0700984
985 if (likely(checked)) {
986 if (unlikely(!is_inited))
987 LOG_ERR("%s: smem not initialized\n", __func__);
988 return is_inited;
989 }
990
991 spin_lock_irqsave(&smem_init_check_lock, flags);
992 if (checked) {
993 spin_unlock_irqrestore(&smem_init_check_lock, flags);
994 if (unlikely(!is_inited))
995 LOG_ERR("%s: smem not initialized\n", __func__);
996 return is_inited;
997 }
998
999 smem = smem_ram_base;
1000
1001 if (smem->heap_info.initialized != 1)
1002 goto failed;
1003 if (smem->heap_info.reserved != 0)
1004 goto failed;
1005
1006 /*
1007 * The Modem SBL is now the Master SBL version and is required to
1008 * pre-initialize SMEM and fill in any necessary configuration
1009 * structures. Without the extra configuration data, the SMEM driver
1010 * cannot be properly initialized.
1011 */
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301012 ver = smem->version[MODEM_SBL_VERSION_INDEX];
1013 if (ver == SMEM_COMM_PART_VERSION << 16) {
1014 use_comm_partition = true;
1015 } else if (ver != SMEM_VERSION << 16) {
1016 pr_err("%s: SBL version not correct 0x%x\n",
1017 __func__, smem->version[7]);
Chris Lewb4791c32016-08-01 11:58:55 -07001018 goto failed;
1019 }
1020
1021 is_inited = 1;
1022 checked = 1;
1023 spin_unlock_irqrestore(&smem_init_check_lock, flags);
1024 return is_inited;
1025
1026failed:
1027 is_inited = 0;
1028 checked = 1;
1029 spin_unlock_irqrestore(&smem_init_check_lock, flags);
1030 LOG_ERR(
1031 "%s: shared memory needs to be initialized by SBL before booting\n",
1032 __func__);
1033 return is_inited;
1034}
1035EXPORT_SYMBOL(smem_initialized_check);
1036
1037static int restart_notifier_cb(struct notifier_block *this,
1038 unsigned long code,
1039 void *data)
1040{
1041 struct restart_notifier_block *notifier;
1042 struct notif_data *notifdata = data;
1043 int ret;
1044
1045 switch (code) {
1046
1047 case SUBSYS_AFTER_SHUTDOWN:
1048 notifier = container_of(this,
1049 struct restart_notifier_block, nb);
1050 SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
1051 __func__, notifier->processor,
1052 notifier->name);
1053 remote_spin_release(&remote_spinlock, notifier->processor);
1054 remote_spin_release_all(notifier->processor);
1055 break;
1056 case SUBSYS_SOC_RESET:
1057 if (!(smem_ramdump_dev && notifdata->enable_mini_ramdumps))
1058 break;
1059 case SUBSYS_RAMDUMP_NOTIFICATION:
1060 if (!(smem_ramdump_dev && (notifdata->enable_mini_ramdumps
1061 || notifdata->enable_ramdump)))
1062 break;
1063 SMEM_DBG("%s: saving ramdump\n", __func__);
1064 /*
1065 * XPU protection does not currently allow the
1066 * auxiliary memory regions to be dumped. If this
1067 * changes, then num_smem_areas + 1 should be passed
1068 * into do_elf_ramdump() to dump all regions.
1069 */
1070 ret = do_elf_ramdump(smem_ramdump_dev,
1071 smem_ramdump_segments, 1);
1072 if (ret < 0)
1073 LOG_ERR("%s: unable to dump smem %d\n", __func__, ret);
1074 break;
1075 default:
1076 break;
1077 }
1078
1079 return NOTIFY_DONE;
1080}
1081
1082static __init int modem_restart_late_init(void)
1083{
1084 int i;
1085 void *handle;
1086 struct restart_notifier_block *nb;
1087
Chris Lew4de82312016-10-12 14:13:52 -07001088 if (smem_dev)
1089 smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
Chris Lewb4791c32016-08-01 11:58:55 -07001090 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
1091 LOG_ERR("%s: Unable to create smem ramdump device.\n",
1092 __func__);
1093 smem_ramdump_dev = NULL;
1094 }
1095
1096 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
1097 nb = &restart_notifiers[i];
1098 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
1099 SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
1100 __func__, nb->name, handle);
1101 }
1102
1103 return 0;
1104}
1105late_initcall(modem_restart_late_init);
1106
1107int smem_module_init_notifier_register(struct notifier_block *nb)
1108{
1109 int ret;
1110
1111 if (!nb)
1112 return -EINVAL;
1113 mutex_lock(&smem_module_init_notifier_lock);
1114 ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
1115 if (smem_module_inited)
1116 nb->notifier_call(nb, 0, NULL);
1117 mutex_unlock(&smem_module_init_notifier_lock);
1118 return ret;
1119}
1120EXPORT_SYMBOL(smem_module_init_notifier_register);
1121
1122int smem_module_init_notifier_unregister(struct notifier_block *nb)
1123{
1124 int ret;
1125
1126 if (!nb)
1127 return -EINVAL;
1128 mutex_lock(&smem_module_init_notifier_lock);
1129 ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
1130 nb);
1131 mutex_unlock(&smem_module_init_notifier_lock);
1132 return ret;
1133}
1134EXPORT_SYMBOL(smem_module_init_notifier_unregister);
1135
1136static void smem_module_init_notify(uint32_t state, void *data)
1137{
1138 mutex_lock(&smem_module_init_notifier_lock);
1139 smem_module_inited = 1;
1140 raw_notifier_call_chain(&smem_module_init_notifier_list,
1141 state, data);
1142 mutex_unlock(&smem_module_init_notifier_lock);
1143}
1144
1145/**
1146 * smem_init_security_partition - Init local structures for a secured smem
1147 * partition that has apps as one of the hosts
1148 *
1149 * @entry: Entry in the security TOC for the partition to init
1150 * @num: Partition ID
1151 *
1152 * Initialize local data structures to point to a secured smem partition
1153 * that is accessible by apps and another processor. Assumes that one of the
1154 * listed hosts is apps. Verifiess that the partition is valid, otherwise will
1155 * skip. Checks for memory corruption and will BUG() if detected. Assumes
1156 * smem_areas is already initialized and that smem_areas[0] corresponds to the
1157 * smem region with the secured partitions.
1158 */
1159static void smem_init_security_partition(struct smem_toc_entry *entry,
1160 uint32_t num)
1161{
Channagoud Kadabi075db3b2017-03-16 14:26:17 -07001162 uint16_t remote_host = 0;
Chris Lewb4791c32016-08-01 11:58:55 -07001163 struct smem_partition_header *hdr;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301164 bool is_comm_partition = false;
Chris Lewb4791c32016-08-01 11:58:55 -07001165
1166 if (!entry->offset) {
1167 SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
1168 return;
1169 }
1170 if (!entry->size) {
1171 SMEM_INFO("Skipping smem partition %d - bad size\n", num);
1172 return;
1173 }
1174 if (!entry->size_cacheline) {
1175 SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
1176 return;
1177 }
1178
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301179 if (entry->host0 == SMEM_COMM_HOST && entry->host1 == SMEM_COMM_HOST)
1180 is_comm_partition = true;
Chris Lewb4791c32016-08-01 11:58:55 -07001181
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301182 if (!is_comm_partition) {
1183 if (entry->host0 == SMEM_APPS)
1184 remote_host = entry->host1;
1185 else
1186 remote_host = entry->host0;
1187
1188 if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
1189 SMEM_INFO(
1190 "Skipping smem partition %d - bad remote:%d\n",
1191 num, remote_host);
1192 return;
1193 }
1194 if (partitions[remote_host].offset) {
1195 SMEM_INFO(
1196 "Skipping smem partition %d - duplicate of %d\n",
1197 num, partitions[remote_host].partition_num);
1198 return;
1199 }
1200
1201 if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
1202 SMEM_INFO(
1203 "Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
1204 num, entry->offset, entry->host0, entry->host1);
1205 return;
1206 }
Chris Lewb4791c32016-08-01 11:58:55 -07001207 }
1208
1209 hdr = smem_areas[0].virt_addr + entry->offset;
1210
Chris Lewb4791c32016-08-01 11:58:55 -07001211 if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
1212 LOG_ERR("Smem partition %d hdr magic is bad\n", num);
1213 BUG();
1214 }
1215 if (!hdr->size) {
1216 LOG_ERR("Smem partition %d size is 0\n", num);
1217 BUG();
1218 }
1219 if (hdr->offset_free_uncached > hdr->size) {
1220 LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
1221 BUG();
1222 }
1223 if (hdr->offset_free_cached > hdr->size) {
1224 LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
1225 BUG();
1226 }
Dhoat Harpalea38a6e2017-01-16 19:08:09 +05301227 if (is_comm_partition) {
1228 if (hdr->host0 == SMEM_COMM_HOST
1229 && hdr->host1 == SMEM_COMM_HOST) {
1230 comm_partition.partition_num = num;
1231 comm_partition.offset = entry->offset;
1232 comm_partition.size_cacheline = entry->size_cacheline;
1233 SMEM_INFO("Common Partition %d offset:%x\n", num,
1234 entry->offset);
1235 } else {
1236 LOG_ERR("Smem Comm partition hosts don't match TOC\n");
1237 WARN_ON(1);
1238 }
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301239 return;
1240 }
Chris Lewb4791c32016-08-01 11:58:55 -07001241 if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
1242 LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
1243 BUG();
1244 }
1245 if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
1246 LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
1247 BUG();
1248 }
1249
1250 partitions[remote_host].partition_num = num;
1251 partitions[remote_host].offset = entry->offset;
1252 partitions[remote_host].size_cacheline = entry->size_cacheline;
1253 SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
1254 remote_host);
1255}
1256
1257/**
1258 * smem_init_security - Init local support for secured smem
1259 *
1260 * Looks for a valid security TOC, and if one is found, parses it looking for
1261 * partitions that apps can access. If any such partitions are found, do the
1262 * required local initialization to support them. Assumes smem_areas is inited
1263 * and smem_area[0] corresponds to the smem region with the TOC.
1264 */
1265static void smem_init_security(void)
1266{
1267 struct smem_toc *toc;
1268 uint32_t i;
1269
1270 SMEM_DBG("%s\n", __func__);
1271
1272 toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
1273
1274 if (toc->identifier != SMEM_TOC_IDENTIFIER) {
1275 LOG_ERR("%s failed: invalid TOC magic\n", __func__);
1276 return;
1277 }
1278
1279 for (i = 0; i < toc->num_entries; ++i) {
1280 SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
1281 toc->entry[i].host0,
1282 toc->entry[i].host1);
1283 smem_init_security_partition(&toc->entry[i], i);
1284 }
1285
1286 SMEM_DBG("%s done\n", __func__);
1287}
1288
1289/**
1290 * smem_init_target_info - Init smem target information
1291 *
1292 * @info_addr : smem target info physical address.
1293 * @size : size of the smem target info structure.
1294 *
1295 * This function is used to initialize the smem_targ_info structure and checks
1296 * for valid identifier, if identifier is valid initialize smem variables.
1297 */
1298static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
1299{
1300 struct smem_targ_info_type *smem_targ_info;
1301 void *smem_targ_info_addr;
1302
1303 smem_targ_info_addr = ioremap_nocache(info_addr, size);
1304 if (!smem_targ_info_addr) {
1305 LOG_ERR("%s: failed ioremap_nocache() of addr:%pa size:%pa\n",
1306 __func__, &info_addr, &size);
1307 return -ENODEV;
1308 }
1309 smem_targ_info =
1310 (struct smem_targ_info_type __iomem *)smem_targ_info_addr;
1311
1312 if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
1313 LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
1314 return -ENODEV;
1315 }
1316 smem_ram_phys = smem_targ_info->phys_base_addr;
1317 smem_ram_size = smem_targ_info->size;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301318 if (smem_targ_info->max_items)
1319 smem_max_items = smem_targ_info->max_items;
Chris Lewb4791c32016-08-01 11:58:55 -07001320 iounmap(smem_targ_info_addr);
1321 return 0;
1322}
1323
1324static int msm_smem_probe(struct platform_device *pdev)
1325{
1326 char *key;
1327 struct resource *r;
1328 phys_addr_t aux_mem_base;
1329 resource_size_t aux_mem_size;
1330 int temp_string_size = 11; /* max 3 digit count */
1331 char temp_string[temp_string_size];
1332 int ret;
1333 struct ramdump_segment *ramdump_segments_tmp = NULL;
1334 struct smem_area *smem_areas_tmp = NULL;
1335 int smem_idx = 0;
1336 bool security_enabled;
1337
1338 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1339 "smem_targ_info_imem");
1340 if (r) {
1341 if (smem_init_target_info(r->start, resource_size(r)))
1342 goto smem_targ_info_legacy;
1343 goto smem_targ_info_done;
1344 }
1345
1346 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1347 "smem_targ_info_reg");
1348 if (r) {
1349 void *reg_base_addr;
1350 uint64_t base_addr;
1351
1352 reg_base_addr = ioremap_nocache(r->start, resource_size(r));
1353 base_addr = (uint32_t)readl_relaxed(reg_base_addr);
1354 base_addr |=
1355 ((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
1356 iounmap(reg_base_addr);
1357 if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
1358 SMEM_INFO("%s: Invalid SMEM address\n", __func__);
1359 goto smem_targ_info_legacy;
1360 }
1361 if (smem_init_target_info(base_addr,
1362 sizeof(struct smem_targ_info_type)))
1363 goto smem_targ_info_legacy;
1364 goto smem_targ_info_done;
1365 }
1366
1367smem_targ_info_legacy:
1368 SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
1369 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
1370 if (r) {
1371 smem_ram_size = resource_size(r);
1372 smem_ram_phys = r->start;
1373 }
1374
1375smem_targ_info_done:
1376 if (!smem_ram_phys || !smem_ram_size) {
1377 LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
1378 return -ENODEV;
1379 }
1380
1381 smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
1382
1383 if (!smem_ram_base) {
1384 LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
1385 __func__,
1386 &smem_ram_phys, &smem_ram_size);
1387 return -ENODEV;
1388 }
1389
1390 if (!smem_initialized_check())
1391 return -ENODEV;
1392
1393 /*
1394 * The software implementation requires smem_find(), which needs
1395 * smem_ram_base to be intitialized. The remote spinlock item is
1396 * guaranteed to be allocated by the bootloader, so this is the
1397 * safest and earliest place to init the spinlock.
1398 */
1399 ret = init_smem_remote_spinlock();
1400 if (ret) {
1401 LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
1402 return ret;
1403 }
1404
1405 key = "irq-reg-base";
1406 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
1407 if (!r) {
1408 LOG_ERR("%s: missing '%s'\n", __func__, key);
1409 return -ENODEV;
1410 }
1411
1412 num_smem_areas = 1;
1413 while (1) {
1414 scnprintf(temp_string, temp_string_size, "aux-mem%d",
1415 num_smem_areas);
1416 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1417 temp_string);
1418 if (!r)
1419 break;
1420
1421 ++num_smem_areas;
1422 if (num_smem_areas > 999) {
1423 LOG_ERR("%s: max num aux mem regions reached\n",
1424 __func__);
1425 break;
1426 }
1427 }
1428 /* Initialize main SMEM region and SSR ramdump region */
1429 smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
1430 GFP_KERNEL);
1431 if (!smem_areas_tmp) {
1432 LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
1433 ret = -ENOMEM;
1434 goto free_smem_areas;
1435 }
1436
1437 ramdump_segments_tmp = kcalloc(num_smem_areas,
1438 sizeof(struct ramdump_segment), GFP_KERNEL);
1439 if (!ramdump_segments_tmp) {
1440 LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
1441 ret = -ENOMEM;
1442 goto free_smem_areas;
1443 }
1444 smem_areas_tmp[smem_idx].phys_addr = smem_ram_phys;
1445 smem_areas_tmp[smem_idx].size = smem_ram_size;
1446 smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
1447
1448 ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
1449 ramdump_segments_tmp[smem_idx].size = smem_ram_size;
1450 ++smem_idx;
1451
1452 /* Configure auxiliary SMEM regions */
1453 while (1) {
1454 scnprintf(temp_string, temp_string_size, "aux-mem%d",
1455 smem_idx);
1456 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1457 temp_string);
1458 if (!r)
1459 break;
1460 aux_mem_base = r->start;
1461 aux_mem_size = resource_size(r);
1462
1463 ramdump_segments_tmp[smem_idx].address = aux_mem_base;
1464 ramdump_segments_tmp[smem_idx].size = aux_mem_size;
1465
1466 smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
1467 smem_areas_tmp[smem_idx].size = aux_mem_size;
1468 smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
1469 (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
1470 smem_areas_tmp[smem_idx].size);
1471 SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
1472 &aux_mem_base, &aux_mem_size,
1473 smem_areas_tmp[smem_idx].virt_addr);
1474
1475 if (!smem_areas_tmp[smem_idx].virt_addr) {
1476 LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
1477 __func__,
1478 &smem_areas_tmp[smem_idx].phys_addr,
1479 &smem_areas_tmp[smem_idx].size);
1480 ret = -ENOMEM;
1481 goto free_smem_areas;
1482 }
1483
1484 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
1485 (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
1486 smem_areas_tmp[smem_idx].size)) {
1487 LOG_ERR(
1488 "%s: invalid virtual address block %i: %p:%pa\n",
1489 __func__, smem_idx,
1490 smem_areas_tmp[smem_idx].virt_addr,
1491 &smem_areas_tmp[smem_idx].size);
1492 ++smem_idx;
1493 ret = -EINVAL;
1494 goto free_smem_areas;
1495 }
1496
1497 ++smem_idx;
1498 if (smem_idx > 999) {
1499 LOG_ERR("%s: max num aux mem regions reached\n",
1500 __func__);
1501 break;
1502 }
1503 }
1504
1505 smem_areas = smem_areas_tmp;
1506 smem_ramdump_segments = ramdump_segments_tmp;
1507
1508 key = "qcom,mpu-enabled";
1509 security_enabled = of_property_read_bool(pdev->dev.of_node, key);
1510 if (security_enabled) {
1511 SMEM_INFO("smem security enabled\n");
1512 smem_init_security();
1513 }
Chris Lew4de82312016-10-12 14:13:52 -07001514 smem_dev = &pdev->dev;
Chris Lewb4791c32016-08-01 11:58:55 -07001515 probe_done = true;
1516
1517 ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1518 if (ret)
1519 LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
1520
1521 return 0;
1522
1523free_smem_areas:
1524 for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
1525 iounmap(smem_areas_tmp[smem_idx].virt_addr);
1526
1527 num_smem_areas = 0;
1528 kfree(ramdump_segments_tmp);
1529 kfree(smem_areas_tmp);
1530 return ret;
1531}
1532
1533static const struct of_device_id msm_smem_match_table[] = {
1534 { .compatible = "qcom,smem" },
1535 {},
1536};
1537
1538static struct platform_driver msm_smem_driver = {
1539 .probe = msm_smem_probe,
1540 .driver = {
1541 .name = "msm_smem",
1542 .owner = THIS_MODULE,
1543 .of_match_table = msm_smem_match_table,
1544 },
1545};
1546
1547int __init msm_smem_init(void)
1548{
1549 static bool registered;
1550 int rc;
1551
1552 if (registered)
1553 return 0;
1554
1555 registered = true;
Dhoat Harpal4ff370d2016-11-14 16:41:04 +05301556 smem_max_items = SMEM_NUM_ITEMS;
Chris Lewb4791c32016-08-01 11:58:55 -07001557 smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
1558 if (!smem_ipc_log_ctx) {
1559 pr_err("%s: unable to create logging context\n", __func__);
1560 msm_smem_debug_mask = 0;
1561 }
1562
1563 rc = platform_driver_register(&msm_smem_driver);
1564 if (rc) {
1565 LOG_ERR("%s: msm_smem_driver register failed %d\n",
1566 __func__, rc);
1567 return rc;
1568 }
1569
1570 smem_module_init_notify(0, NULL);
1571
1572 return 0;
1573}
1574
1575arch_initcall(msm_smem_init);