blob: a754b84ba0167f7a2b8db701fb3e09c73026e4f4 [file] [log] [blame]
Marek Szyprowski9d8eab72013-08-26 14:43:10 +02001/*
2 * Device tree based initialization code for reserved memory.
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#include <asm/dma-contiguous.h>
15
16#include <linux/memblock.h>
17#include <linux/err.h>
18#include <linux/of.h>
19#include <linux/of_fdt.h>
20#include <linux/of_platform.h>
21#include <linux/mm.h>
22#include <linux/sizes.h>
23#include <linux/mm_types.h>
24#include <linux/dma-contiguous.h>
25#include <linux/dma-mapping.h>
26#include <linux/of_reserved_mem.h>
27
28#define MAX_RESERVED_REGIONS 16
29struct reserved_mem {
30 phys_addr_t base;
31 unsigned long size;
32 struct cma *cma;
33 char name[32];
34};
35static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
36static int reserved_mem_count;
37
38static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
39 int depth, void *data)
40{
41 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
42 phys_addr_t base, size;
43 int is_cma, is_reserved;
44 unsigned long len;
45 const char *status;
46 __be32 *prop;
47
48 is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
49 of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
50 is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
51
52 if (!is_reserved && !is_cma) {
53 /* ignore node and scan next one */
54 return 0;
55 }
56
57 status = of_get_flat_dt_prop(node, "status", &len);
58 if (status && strcmp(status, "okay") != 0) {
59 /* ignore disabled node nad scan next one */
60 return 0;
61 }
62
63 prop = of_get_flat_dt_prop(node, "reg", &len);
64 if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
65 sizeof(__be32))) {
66 pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
67 uname);
68 /* ignore node and scan next one */
69 return 0;
70 }
71 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
72 size = dt_mem_next_cell(dt_root_size_cells, &prop);
73
74 if (!size) {
75 /* ignore node and scan next one */
76 return 0;
77 }
78
79 pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
80 uname, (unsigned long)base, (unsigned long)size / SZ_1M);
81
82 if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
83 return -ENOSPC;
84
85 rmem->base = base;
86 rmem->size = size;
87 strlcpy(rmem->name, uname, sizeof(rmem->name));
88
89 if (is_cma) {
90 struct cma *cma;
91 if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
92 rmem->cma = cma;
93 reserved_mem_count++;
94 if (of_get_flat_dt_prop(node,
95 "linux,default-contiguous-region",
96 NULL))
97 dma_contiguous_set_default(cma);
98 }
99 } else if (is_reserved) {
100 if (memblock_remove(base, size) == 0)
101 reserved_mem_count++;
102 else
103 pr_err("Failed to reserve memory for %s\n", uname);
104 }
105
106 return 0;
107}
108
109static struct reserved_mem *get_dma_memory_region(struct device *dev)
110{
111 struct device_node *node;
112 const char *name;
113 int i;
114
115 node = of_parse_phandle(dev->of_node, "memory-region", 0);
116 if (!node)
117 return NULL;
118
119 name = kbasename(node->full_name);
120 for (i = 0; i < reserved_mem_count; i++)
121 if (strcmp(name, reserved_mem[i].name) == 0)
122 return &reserved_mem[i];
123 return NULL;
124}
125
126/**
127 * of_reserved_mem_device_init() - assign reserved memory region to given device
128 *
129 * This function assign memory region pointed by "memory-region" device tree
130 * property to the given device.
131 */
132void of_reserved_mem_device_init(struct device *dev)
133{
134 struct reserved_mem *region = get_dma_memory_region(dev);
135 if (!region)
136 return;
137
138 if (region->cma) {
139 dev_set_cma_area(dev, region->cma);
140 pr_info("Assigned CMA %s to %s device\n", region->name,
141 dev_name(dev));
142 } else {
143 if (dma_declare_coherent_memory(dev, region->base, region->base,
144 region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
145 pr_info("Declared reserved memory %s to %s device\n",
146 region->name, dev_name(dev));
147 }
148}
149
150/**
151 * of_reserved_mem_device_release() - release reserved memory device structures
152 *
153 * This function releases structures allocated for memory region handling for
154 * the given device.
155 */
156void of_reserved_mem_device_release(struct device *dev)
157{
158 struct reserved_mem *region = get_dma_memory_region(dev);
159 if (!region && !region->cma)
160 dma_release_declared_memory(dev);
161}
162
163/**
164 * early_init_dt_scan_reserved_mem() - create reserved memory regions
165 *
166 * This function grabs memory from early allocator for device exclusive use
167 * defined in device tree structures. It should be called by arch specific code
168 * once the early allocator (memblock) has been activated and all other
169 * subsystems have already allocated/reserved memory.
170 */
171void __init early_init_dt_scan_reserved_mem(void)
172{
173 of_scan_flat_dt_by_path("/memory/reserved-memory",
174 fdt_scan_reserved_mem, NULL);
175}