blob: eed788698268cb1d53b812db531be3eeff60ce86 [file] [log] [blame]
Marek Szyprowski3f0c8202014-02-28 14:42:48 +01001/*
2 * Device tree based initialization code for reserved memory.
3 *
Mitchel Humpherysae1add22015-09-15 18:30:36 -07004 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
Marek Szyprowski3f0c8202014-02-28 14:42:48 +01005 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
7 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
8 * Author: Josh Cartwright <joshc@codeaurora.org>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License or (at your optional) any later version of the license.
14 */
15
Rob Herring606ad422016-06-15 08:32:18 -050016#define pr_fmt(fmt) "OF: reserved mem: " fmt
17
Marek Szyprowski3f0c8202014-02-28 14:42:48 +010018#include <linux/err.h>
19#include <linux/of.h>
20#include <linux/of_fdt.h>
21#include <linux/of_platform.h>
22#include <linux/mm.h>
23#include <linux/sizes.h>
24#include <linux/of_reserved_mem.h>
Mitchel Humpherysae1add22015-09-15 18:30:36 -070025#include <linux/sort.h>
Marek Szyprowski3f0c8202014-02-28 14:42:48 +010026
27#define MAX_RESERVED_REGIONS 16
28static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
29static int reserved_mem_count;
30
31#if defined(CONFIG_HAVE_MEMBLOCK)
32#include <linux/memblock.h>
33int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
34 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
35 phys_addr_t *res_base)
36{
Vinayak Menone53b50c2016-02-22 19:15:44 +053037 phys_addr_t base;
Marek Szyprowski3f0c8202014-02-28 14:42:48 +010038 /*
39 * We use __memblock_alloc_base() because memblock_alloc_base()
40 * panic()s on allocation failure.
41 */
Vinayak Menone53b50c2016-02-22 19:15:44 +053042 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
43 base = __memblock_alloc_base(size, align, end);
Marek Szyprowski3f0c8202014-02-28 14:42:48 +010044 if (!base)
45 return -ENOMEM;
46
47 /*
48 * Check if the allocated region fits in to start..end window
49 */
50 if (base < start) {
51 memblock_free(base, size);
52 return -ENOMEM;
53 }
54
55 *res_base = base;
56 if (nomap)
57 return memblock_remove(base, size);
58 return 0;
59}
60#else
61int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
62 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
63 phys_addr_t *res_base)
64{
65 pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
66 size, nomap ? " (nomap)" : "");
67 return -ENOSYS;
68}
69#endif
70
71/**
72 * res_mem_save_node() - save fdt node for second pass initialization
73 */
74void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
75 phys_addr_t base, phys_addr_t size)
76{
77 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
78
79 if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
Rob Herring606ad422016-06-15 08:32:18 -050080 pr_err("not enough space all defined regions.\n");
Marek Szyprowski3f0c8202014-02-28 14:42:48 +010081 return;
82 }
83
84 rmem->fdt_node = node;
85 rmem->name = uname;
86 rmem->base = base;
87 rmem->size = size;
88
89 reserved_mem_count++;
90 return;
91}
92
93/**
94 * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
95 * and 'alloc-ranges' properties
96 */
97static int __init __reserved_mem_alloc_size(unsigned long node,
98 const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
99{
100 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
101 phys_addr_t start = 0, end = 0;
102 phys_addr_t base = 0, align = 0, size;
Rob Herring9d0c4df2014-04-01 23:49:03 -0500103 int len;
104 const __be32 *prop;
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100105 int nomap;
106 int ret;
107
108 prop = of_get_flat_dt_prop(node, "size", &len);
109 if (!prop)
110 return -EINVAL;
111
112 if (len != dt_root_size_cells * sizeof(__be32)) {
Rob Herring606ad422016-06-15 08:32:18 -0500113 pr_err("invalid size property in '%s' node.\n", uname);
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100114 return -EINVAL;
115 }
116 size = dt_mem_next_cell(dt_root_size_cells, &prop);
117
118 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
119
120 prop = of_get_flat_dt_prop(node, "alignment", &len);
121 if (prop) {
122 if (len != dt_root_addr_cells * sizeof(__be32)) {
Rob Herring606ad422016-06-15 08:32:18 -0500123 pr_err("invalid alignment property in '%s' node.\n",
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100124 uname);
125 return -EINVAL;
126 }
127 align = dt_mem_next_cell(dt_root_addr_cells, &prop);
128 }
129
Jason Liu1cc8e342015-11-10 20:30:26 +0800130 /* Need adjust the alignment to satisfy the CMA requirement */
Jaewon7d482812016-05-25 13:29:50 +0900131 if (IS_ENABLED(CONFIG_CMA)
132 && of_flat_dt_is_compatible(node, "shared-dma-pool")
133 && of_get_flat_dt_prop(node, "reusable", NULL)
Stephen Rothwellaaaab562016-05-31 09:38:56 +1000134 && !of_get_flat_dt_prop(node, "no-map", NULL)) {
135 unsigned long order =
136 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
137
138 align = max(align, (phys_addr_t)PAGE_SIZE << order);
139 }
Jason Liu1cc8e342015-11-10 20:30:26 +0800140
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100141 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
142 if (prop) {
143
144 if (len % t_len != 0) {
Rob Herring606ad422016-06-15 08:32:18 -0500145 pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100146 uname);
147 return -EINVAL;
148 }
149
150 base = 0;
151
152 while (len > 0) {
153 start = dt_mem_next_cell(dt_root_addr_cells, &prop);
154 end = start + dt_mem_next_cell(dt_root_size_cells,
155 &prop);
156
157 ret = early_init_dt_alloc_reserved_memory_arch(size,
158 align, start, end, nomap, &base);
159 if (ret == 0) {
Rob Herring606ad422016-06-15 08:32:18 -0500160 pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100161 uname, &base,
162 (unsigned long)size / SZ_1M);
163 break;
164 }
165 len -= t_len;
166 }
167
168 } else {
169 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
170 0, 0, nomap, &base);
171 if (ret == 0)
Rob Herring606ad422016-06-15 08:32:18 -0500172 pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100173 uname, &base, (unsigned long)size / SZ_1M);
174 }
175
176 if (base == 0) {
Rob Herring606ad422016-06-15 08:32:18 -0500177 pr_info("failed to allocate memory for node '%s'\n", uname);
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100178 return -ENOMEM;
179 }
180
181 *res_base = base;
182 *res_size = size;
183
184 return 0;
185}
186
Marek Szyprowskif618c472014-02-28 14:42:49 +0100187static const struct of_device_id __rmem_of_table_sentinel
188 __used __section(__reservedmem_of_table_end);
189
190/**
191 * res_mem_init_node() - call region specific reserved memory init code
192 */
193static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
194{
195 extern const struct of_device_id __reservedmem_of_table[];
196 const struct of_device_id *i;
197
198 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
199 reservedmem_of_init_fn initfn = i->data;
200 const char *compat = i->compatible;
201
202 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
203 continue;
204
Rob Herring9dd31072014-05-08 16:06:17 -0500205 if (initfn(rmem) == 0) {
Rob Herring606ad422016-06-15 08:32:18 -0500206 pr_info("initialized node %s, compatible id %s\n",
Marek Szyprowskif618c472014-02-28 14:42:49 +0100207 rmem->name, compat);
208 return 0;
209 }
210 }
211 return -ENOENT;
212}
213
Mitchel Humpherysae1add22015-09-15 18:30:36 -0700214static int __init __rmem_cmp(const void *a, const void *b)
215{
216 const struct reserved_mem *ra = a, *rb = b;
217
Michael Ellerman9eb8cd22015-11-18 21:46:38 +1100218 if (ra->base < rb->base)
219 return -1;
220
221 if (ra->base > rb->base)
222 return 1;
223
224 return 0;
Mitchel Humpherysae1add22015-09-15 18:30:36 -0700225}
226
227static void __init __rmem_check_for_overlap(void)
228{
229 int i;
230
231 if (reserved_mem_count < 2)
232 return;
233
234 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
235 __rmem_cmp, NULL);
236 for (i = 0; i < reserved_mem_count - 1; i++) {
237 struct reserved_mem *this, *next;
238
239 this = &reserved_mem[i];
240 next = &reserved_mem[i + 1];
241 if (!(this->base && next->base))
242 continue;
243 if (this->base + this->size > next->base) {
244 phys_addr_t this_end, next_end;
245
246 this_end = this->base + this->size;
247 next_end = next->base + next->size;
Rob Herring606ad422016-06-15 08:32:18 -0500248 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
Michael Ellerman85a1c772015-11-10 16:08:33 +1100249 this->name, &this->base, &this_end,
250 next->name, &next->base, &next_end);
Mitchel Humpherysae1add22015-09-15 18:30:36 -0700251 }
252 }
253}
254
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100255/**
256 * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
257 */
258void __init fdt_init_reserved_mem(void)
259{
260 int i;
Mitchel Humpherysae1add22015-09-15 18:30:36 -0700261
262 /* check for overlapping reserved regions */
263 __rmem_check_for_overlap();
264
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100265 for (i = 0; i < reserved_mem_count; i++) {
266 struct reserved_mem *rmem = &reserved_mem[i];
267 unsigned long node = rmem->fdt_node;
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200268 int len;
269 const __be32 *prop;
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100270 int err = 0;
271
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200272 prop = of_get_flat_dt_prop(node, "phandle", &len);
273 if (!prop)
274 prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
275 if (prop)
276 rmem->phandle = of_read_number(prop, len/4);
277
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100278 if (rmem->size == 0)
279 err = __reserved_mem_alloc_size(node, rmem->name,
280 &rmem->base, &rmem->size);
Marek Szyprowskif618c472014-02-28 14:42:49 +0100281 if (err == 0)
282 __reserved_mem_init_node(rmem);
Marek Szyprowski3f0c8202014-02-28 14:42:48 +0100283 }
284}
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200285
286static inline struct reserved_mem *__find_rmem(struct device_node *node)
287{
288 unsigned int i;
289
290 if (!node->phandle)
291 return NULL;
292
293 for (i = 0; i < reserved_mem_count; i++)
294 if (reserved_mem[i].phandle == node->phandle)
295 return &reserved_mem[i];
296 return NULL;
297}
298
299/**
300 * of_reserved_mem_device_init() - assign reserved memory region to given device
301 *
302 * This function assign memory region pointed by "memory-region" device tree
303 * property to the given device.
304 */
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700305int of_reserved_mem_device_init(struct device *dev)
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200306{
307 struct reserved_mem *rmem;
308 struct device_node *np;
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700309 int ret;
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200310
311 np = of_parse_phandle(dev->of_node, "memory-region", 0);
312 if (!np)
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700313 return -ENODEV;
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200314
315 rmem = __find_rmem(np);
316 of_node_put(np);
317
318 if (!rmem || !rmem->ops || !rmem->ops->device_init)
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700319 return -EINVAL;
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200320
Marek Szyprowski47f29df2014-10-29 14:50:29 -0700321 ret = rmem->ops->device_init(rmem, dev);
322 if (ret == 0)
323 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
324
325 return ret;
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200326}
George G. Davis615fde72015-01-09 09:29:05 -0500327EXPORT_SYMBOL_GPL(of_reserved_mem_device_init);
Marek Szyprowski9dcfee02014-07-14 10:28:04 +0200328
329/**
330 * of_reserved_mem_device_release() - release reserved memory device structures
331 *
332 * This function releases structures allocated for memory region handling for
333 * the given device.
334 */
335void of_reserved_mem_device_release(struct device *dev)
336{
337 struct reserved_mem *rmem;
338 struct device_node *np;
339
340 np = of_parse_phandle(dev->of_node, "memory-region", 0);
341 if (!np)
342 return;
343
344 rmem = __find_rmem(np);
345 of_node_put(np);
346
347 if (!rmem || !rmem->ops || !rmem->ops->device_release)
348 return;
349
350 rmem->ops->device_release(rmem, dev);
351}
George G. Davis615fde72015-01-09 09:29:05 -0500352EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);