Marek Szyprowski | 9d8eab7 | 2013-08-26 14:43:10 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Device tree based initialization code for reserved memory. |
| 3 | * |
| 4 | * Copyright (c) 2013 Samsung Electronics Co., Ltd. |
| 5 | * http://www.samsung.com |
| 6 | * Author: Marek Szyprowski <m.szyprowski@samsung.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License as |
| 10 | * published by the Free Software Foundation; either version 2 of the |
| 11 | * License or (at your optional) any later version of the license. |
| 12 | */ |
| 13 | |
Marek Szyprowski | 9d8eab7 | 2013-08-26 14:43:10 +0200 | [diff] [blame] | 14 | #include <linux/memblock.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/of.h> |
| 17 | #include <linux/of_fdt.h> |
| 18 | #include <linux/of_platform.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/sizes.h> |
| 21 | #include <linux/mm_types.h> |
| 22 | #include <linux/dma-contiguous.h> |
| 23 | #include <linux/dma-mapping.h> |
| 24 | #include <linux/of_reserved_mem.h> |
| 25 | |
| 26 | #define MAX_RESERVED_REGIONS 16 |
| 27 | struct reserved_mem { |
| 28 | phys_addr_t base; |
| 29 | unsigned long size; |
| 30 | struct cma *cma; |
| 31 | char name[32]; |
| 32 | }; |
| 33 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
| 34 | static int reserved_mem_count; |
| 35 | |
| 36 | static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname, |
| 37 | int depth, void *data) |
| 38 | { |
| 39 | struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; |
| 40 | phys_addr_t base, size; |
| 41 | int is_cma, is_reserved; |
| 42 | unsigned long len; |
| 43 | const char *status; |
| 44 | __be32 *prop; |
| 45 | |
| 46 | is_cma = IS_ENABLED(CONFIG_DMA_CMA) && |
| 47 | of_flat_dt_is_compatible(node, "linux,contiguous-memory-region"); |
| 48 | is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region"); |
| 49 | |
| 50 | if (!is_reserved && !is_cma) { |
| 51 | /* ignore node and scan next one */ |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | status = of_get_flat_dt_prop(node, "status", &len); |
| 56 | if (status && strcmp(status, "okay") != 0) { |
| 57 | /* ignore disabled node nad scan next one */ |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | prop = of_get_flat_dt_prop(node, "reg", &len); |
| 62 | if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) * |
| 63 | sizeof(__be32))) { |
| 64 | pr_err("Reserved mem: node %s, incorrect \"reg\" property\n", |
| 65 | uname); |
| 66 | /* ignore node and scan next one */ |
| 67 | return 0; |
| 68 | } |
| 69 | base = dt_mem_next_cell(dt_root_addr_cells, &prop); |
| 70 | size = dt_mem_next_cell(dt_root_size_cells, &prop); |
| 71 | |
| 72 | if (!size) { |
| 73 | /* ignore node and scan next one */ |
| 74 | return 0; |
| 75 | } |
| 76 | |
| 77 | pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n", |
| 78 | uname, (unsigned long)base, (unsigned long)size / SZ_1M); |
| 79 | |
| 80 | if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) |
| 81 | return -ENOSPC; |
| 82 | |
| 83 | rmem->base = base; |
| 84 | rmem->size = size; |
| 85 | strlcpy(rmem->name, uname, sizeof(rmem->name)); |
| 86 | |
| 87 | if (is_cma) { |
| 88 | struct cma *cma; |
| 89 | if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) { |
| 90 | rmem->cma = cma; |
| 91 | reserved_mem_count++; |
| 92 | if (of_get_flat_dt_prop(node, |
| 93 | "linux,default-contiguous-region", |
| 94 | NULL)) |
| 95 | dma_contiguous_set_default(cma); |
| 96 | } |
| 97 | } else if (is_reserved) { |
| 98 | if (memblock_remove(base, size) == 0) |
| 99 | reserved_mem_count++; |
| 100 | else |
| 101 | pr_err("Failed to reserve memory for %s\n", uname); |
| 102 | } |
| 103 | |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | static struct reserved_mem *get_dma_memory_region(struct device *dev) |
| 108 | { |
| 109 | struct device_node *node; |
| 110 | const char *name; |
| 111 | int i; |
| 112 | |
| 113 | node = of_parse_phandle(dev->of_node, "memory-region", 0); |
| 114 | if (!node) |
| 115 | return NULL; |
| 116 | |
| 117 | name = kbasename(node->full_name); |
| 118 | for (i = 0; i < reserved_mem_count; i++) |
| 119 | if (strcmp(name, reserved_mem[i].name) == 0) |
| 120 | return &reserved_mem[i]; |
| 121 | return NULL; |
| 122 | } |
| 123 | |
| 124 | /** |
| 125 | * of_reserved_mem_device_init() - assign reserved memory region to given device |
| 126 | * |
| 127 | * This function assign memory region pointed by "memory-region" device tree |
| 128 | * property to the given device. |
| 129 | */ |
| 130 | void of_reserved_mem_device_init(struct device *dev) |
| 131 | { |
| 132 | struct reserved_mem *region = get_dma_memory_region(dev); |
| 133 | if (!region) |
| 134 | return; |
| 135 | |
| 136 | if (region->cma) { |
| 137 | dev_set_cma_area(dev, region->cma); |
| 138 | pr_info("Assigned CMA %s to %s device\n", region->name, |
| 139 | dev_name(dev)); |
| 140 | } else { |
| 141 | if (dma_declare_coherent_memory(dev, region->base, region->base, |
| 142 | region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0) |
| 143 | pr_info("Declared reserved memory %s to %s device\n", |
| 144 | region->name, dev_name(dev)); |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /** |
| 149 | * of_reserved_mem_device_release() - release reserved memory device structures |
| 150 | * |
| 151 | * This function releases structures allocated for memory region handling for |
| 152 | * the given device. |
| 153 | */ |
| 154 | void of_reserved_mem_device_release(struct device *dev) |
| 155 | { |
| 156 | struct reserved_mem *region = get_dma_memory_region(dev); |
| 157 | if (!region && !region->cma) |
| 158 | dma_release_declared_memory(dev); |
| 159 | } |
| 160 | |
| 161 | /** |
| 162 | * early_init_dt_scan_reserved_mem() - create reserved memory regions |
| 163 | * |
| 164 | * This function grabs memory from early allocator for device exclusive use |
| 165 | * defined in device tree structures. It should be called by arch specific code |
| 166 | * once the early allocator (memblock) has been activated and all other |
| 167 | * subsystems have already allocated/reserved memory. |
| 168 | */ |
| 169 | void __init early_init_dt_scan_reserved_mem(void) |
| 170 | { |
| 171 | of_scan_flat_dt_by_path("/memory/reserved-memory", |
| 172 | fdt_scan_reserved_mem, NULL); |
| 173 | } |