blob: 0223751e052b7048635e9cbedb27e747b20b2290 [file] [log] [blame]
Laura Abbottf9716072014-08-05 18:47:54 -07001/*
2 *
Liam Markdaf65bd2017-02-15 13:52:48 -08003 * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Laura Abbottf9716072014-08-05 18:47:54 -07004 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Patrick Dalyb5520a52016-08-17 15:43:08 -070010#include <linux/bootmem.h>
Laura Abbottf9716072014-08-05 18:47:54 -070011#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/gfp.h>
14#include <linux/errno.h>
Patrick Dalyb5520a52016-08-17 15:43:08 -070015#include <linux/ioport.h>
Laura Abbottf9716072014-08-05 18:47:54 -070016#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dma-contiguous.h>
21#include <linux/highmem.h>
22#include <linux/memblock.h>
23#include <linux/slab.h>
24#include <linux/iommu.h>
25#include <linux/io.h>
26#include <linux/vmalloc.h>
27#include <linux/sizes.h>
28#include <linux/spinlock.h>
Patrick Dalyb5520a52016-08-17 15:43:08 -070029#include <asm/dma-contiguous.h>
30#include <asm/tlbflush.h>
Laura Abbottf9716072014-08-05 18:47:54 -070031
32struct removed_region {
Shiraz Hashim28de4372015-09-02 12:01:55 +053033 phys_addr_t base;
Laura Abbottf9716072014-08-05 18:47:54 -070034 int nr_pages;
35 unsigned long *bitmap;
Patrick Dalyb5520a52016-08-17 15:43:08 -070036 int fixup;
Laura Abbottf9716072014-08-05 18:47:54 -070037 struct mutex lock;
38};
39
40#define NO_KERNEL_MAPPING_DUMMY 0x2222
41
42static int dma_init_removed_memory(phys_addr_t phys_addr, size_t size,
43 struct removed_region **mem)
44{
45 struct removed_region *dma_mem = NULL;
46 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
48
49 dma_mem = kzalloc(sizeof(struct removed_region), GFP_KERNEL);
50 if (!dma_mem)
51 goto out;
52 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
53 if (!dma_mem->bitmap)
54 goto free1_out;
55
56 dma_mem->base = phys_addr;
57 dma_mem->nr_pages = pages;
58 mutex_init(&dma_mem->lock);
59
60 *mem = dma_mem;
61
62 return 0;
63
64free1_out:
65 kfree(dma_mem);
66out:
67 return -ENOMEM;
68}
69
70static int dma_assign_removed_region(struct device *dev,
71 struct removed_region *mem)
72{
73 if (dev->removed_mem)
74 return -EBUSY;
75
76 dev->removed_mem = mem;
77 return 0;
78}
79
Patrick Dalyb5520a52016-08-17 15:43:08 -070080static void adapt_iomem_resource(unsigned long base_pfn, unsigned long end_pfn)
81{
82 struct resource *res, *conflict;
83 resource_size_t cstart, cend;
84
85 res = kzalloc(sizeof(*res), GFP_KERNEL);
86 if (!res)
87 return;
88
89 res->name = "System RAM";
90 res->start = __pfn_to_phys(base_pfn);
91 res->end = __pfn_to_phys(end_pfn) - 1;
92 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
93
94 conflict = request_resource_conflict(&iomem_resource, res);
95 if (!conflict) {
96 pr_err("Removed memory: no conflict resource found\n");
97 kfree(res);
98 goto done;
99 }
100
101 cstart = conflict->start;
102 cend = conflict->end;
103 if ((cstart == res->start) && (cend == res->end)) {
104 release_resource(conflict);
105 } else if ((res->start >= cstart) && (res->start <= cend)) {
106 if (res->start == cstart) {
107 adjust_resource(conflict, res->end + 1,
108 cend - res->end);
109 } else if (res->end == cend) {
110 adjust_resource(conflict, cstart,
111 res->start - cstart);
112 } else {
113 adjust_resource(conflict, cstart,
114 res->start - cstart);
115 res->start = res->end + 1;
116 res->end = cend;
117 request_resource(&iomem_resource, res);
118 goto done;
119 }
120 } else {
121 pr_err("Removed memory: incorrect resource conflict start=%llx end=%llx\n",
122 (unsigned long long) conflict->start,
123 (unsigned long long) conflict->end);
124 }
125
126 kfree(res);
127done:
128 return;
129}
130
131#ifdef CONFIG_FLATMEM
132static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
133{
134 struct page *start_pg, *end_pg;
135 unsigned long pg, pgend;
136
Shiraz Hashimba724782016-04-11 16:45:13 +0530137 start_pfn = ALIGN(start_pfn, pageblock_nr_pages);
138 end_pfn = round_down(end_pfn, pageblock_nr_pages);
Patrick Dalyb5520a52016-08-17 15:43:08 -0700139 /*
140 * Convert start_pfn/end_pfn to a struct page pointer.
141 */
142 start_pg = pfn_to_page(start_pfn - 1) + 1;
143 end_pg = pfn_to_page(end_pfn - 1) + 1;
144
145 /*
146 * Convert to physical addresses, and round start upwards and end
147 * downwards.
148 */
149 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
150 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
151
152 /*
153 * If there are free pages between these, free the section of the
154 * memmap array.
155 */
156 if (pg < pgend)
157 free_bootmem_late(pg, pgend - pg);
158}
159#else
160static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
161{
162}
163#endif
164
165static int _clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
166 void *data)
167{
168 pte_clear(&init_mm, addr, pte);
169 return 0;
170}
171
172static void clear_mapping(unsigned long addr, unsigned long size)
173{
174 apply_to_page_range(&init_mm, addr, size, _clear_pte, NULL);
175 /* ensure ptes are updated */
176 mb();
177 flush_tlb_kernel_range(addr, addr + size);
178}
179
180static void removed_region_fixup(struct removed_region *dma_mem, int index)
181{
182 unsigned long fixup_size;
183 unsigned long base_pfn;
Shiraz Hashim3eaf6e62016-04-16 11:43:02 +0530184 unsigned long flags;
Patrick Dalyb5520a52016-08-17 15:43:08 -0700185
186 if (index > dma_mem->nr_pages)
187 return;
188
189 /* carve-out */
Shiraz Hashim3eaf6e62016-04-16 11:43:02 +0530190 flags = memblock_region_resize_late_begin();
Patrick Dalyb5520a52016-08-17 15:43:08 -0700191 memblock_free(dma_mem->base, dma_mem->nr_pages * PAGE_SIZE);
192 memblock_remove(dma_mem->base, index * PAGE_SIZE);
Shiraz Hashim3eaf6e62016-04-16 11:43:02 +0530193 memblock_region_resize_late_end(flags);
Patrick Dalyb5520a52016-08-17 15:43:08 -0700194
195 /* clear page-mappings */
196 base_pfn = dma_mem->base >> PAGE_SHIFT;
197 if (!PageHighMem(pfn_to_page(base_pfn))) {
198 clear_mapping((unsigned long) phys_to_virt(dma_mem->base),
199 index * PAGE_SIZE);
200 }
201
202 /* free page objects */
203 free_memmap(base_pfn, base_pfn + index);
204
205 /* return remaining area to system */
206 fixup_size = (dma_mem->nr_pages - index) * PAGE_SIZE;
207 free_bootmem_late(dma_mem->base + index * PAGE_SIZE, fixup_size);
208
209 /*
210 * release freed resource region so as to show up under iomem resource
211 * list
212 */
213 adapt_iomem_resource(base_pfn, base_pfn + index);
214
215 /* limit the fixup region */
216 dma_mem->nr_pages = index;
217}
218
Laura Abbottf9716072014-08-05 18:47:54 -0700219void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
220 gfp_t gfp, unsigned long attrs)
221{
222 bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
223 bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
Srinivasarao Pf3cb76b2018-08-13 17:51:11 +0530224 unsigned int pageno;
Shiraz Hashimdd9cc4c2015-09-12 12:31:10 +0530225 unsigned long order;
Vinayak Menonea7a8a52018-12-03 13:46:32 +0530226 void __iomem *addr = NULL;
Laura Abbottf9716072014-08-05 18:47:54 -0700227 struct removed_region *dma_mem = dev->removed_mem;
Srinivasarao Pf3cb76b2018-08-13 17:51:11 +0530228 unsigned int nbits;
Laura Abbottf9716072014-08-05 18:47:54 -0700229 unsigned int align;
230
Laura Abbottf9716072014-08-05 18:47:54 -0700231 if (!gfpflags_allow_blocking(gfp))
232 return NULL;
233
Shiraz Hashimdd9cc4c2015-09-12 12:31:10 +0530234 size = PAGE_ALIGN(size);
235 nbits = size >> PAGE_SHIFT;
236 order = get_order(size);
237
Laura Abbottf9716072014-08-05 18:47:54 -0700238 if (order > get_order(SZ_1M))
239 order = get_order(SZ_1M);
240
241 align = (1 << order) - 1;
242
243
244 mutex_lock(&dma_mem->lock);
245 pageno = bitmap_find_next_zero_area(dma_mem->bitmap, dma_mem->nr_pages,
246 0, nbits, align);
247
248 if (pageno < dma_mem->nr_pages) {
249 phys_addr_t base = dma_mem->base + pageno * PAGE_SIZE;
250 *handle = base;
251
252 bitmap_set(dma_mem->bitmap, pageno, nbits);
253
Patrick Dalyb5520a52016-08-17 15:43:08 -0700254 if (dma_mem->fixup) {
255 removed_region_fixup(dma_mem, pageno + nbits);
256 dma_mem->fixup = 0;
257 }
258
Laura Abbottf9716072014-08-05 18:47:54 -0700259 if (no_kernel_mapping && skip_zeroing) {
260 addr = (void *)NO_KERNEL_MAPPING_DUMMY;
261 goto out;
262 }
263
Vinayak Menonea7a8a52018-12-03 13:46:32 +0530264 addr = ioremap_wc(base, size);
Laura Abbottf9716072014-08-05 18:47:54 -0700265 if (WARN_ON(!addr)) {
266 bitmap_clear(dma_mem->bitmap, pageno, nbits);
267 } else {
268 if (!skip_zeroing)
Shiraz Hashim3025b452015-09-12 16:47:30 +0530269 memset_io(addr, 0, size);
Laura Abbottf9716072014-08-05 18:47:54 -0700270 if (no_kernel_mapping) {
271 iounmap(addr);
272 addr = (void *)NO_KERNEL_MAPPING_DUMMY;
273 }
274 *handle = base;
275 }
276 }
277
278out:
279 mutex_unlock(&dma_mem->lock);
280 return addr;
281}
282
283
284int removed_mmap(struct device *dev, struct vm_area_struct *vma,
285 void *cpu_addr, dma_addr_t dma_addr, size_t size,
286 unsigned long attrs)
287{
288 return -ENXIO;
289}
290
291void removed_free(struct device *dev, size_t size, void *cpu_addr,
292 dma_addr_t handle, unsigned long attrs)
293{
294 bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
295 struct removed_region *dma_mem = dev->removed_mem;
296
Liam Markdaf65bd2017-02-15 13:52:48 -0800297 size = PAGE_ALIGN(size);
Laura Abbottf9716072014-08-05 18:47:54 -0700298 if (!no_kernel_mapping)
299 iounmap(cpu_addr);
300 mutex_lock(&dma_mem->lock);
301 bitmap_clear(dma_mem->bitmap, (handle - dma_mem->base) >> PAGE_SHIFT,
302 size >> PAGE_SHIFT);
303 mutex_unlock(&dma_mem->lock);
304}
305
306static dma_addr_t removed_map_page(struct device *dev, struct page *page,
307 unsigned long offset, size_t size,
308 enum dma_data_direction dir,
309 unsigned long attrs)
310{
311 return ~(dma_addr_t)0;
312}
313
314static void removed_unmap_page(struct device *dev, dma_addr_t dma_handle,
315 size_t size, enum dma_data_direction dir,
316 unsigned long attrs)
317{
318}
319
320static int removed_map_sg(struct device *dev, struct scatterlist *sg,
321 int nents, enum dma_data_direction dir,
322 unsigned long attrs)
323{
324 return 0;
325}
326
327static void removed_unmap_sg(struct device *dev,
328 struct scatterlist *sg, int nents,
329 enum dma_data_direction dir,
330 unsigned long attrs)
331{
332}
333
334static void removed_sync_single_for_cpu(struct device *dev,
335 dma_addr_t dma_handle, size_t size,
336 enum dma_data_direction dir)
337{
338}
339
340void removed_sync_single_for_device(struct device *dev,
341 dma_addr_t dma_handle, size_t size,
342 enum dma_data_direction dir)
343{
344}
345
346void removed_sync_sg_for_cpu(struct device *dev,
347 struct scatterlist *sg, int nents,
348 enum dma_data_direction dir)
349{
350}
351
352void removed_sync_sg_for_device(struct device *dev,
353 struct scatterlist *sg, int nents,
354 enum dma_data_direction dir)
355{
356}
357
Vinayak Menonea7a8a52018-12-03 13:46:32 +0530358static void __iomem *removed_remap(struct device *dev, void *cpu_addr,
359 dma_addr_t handle, size_t size, unsigned long attrs)
Laura Abbottf9716072014-08-05 18:47:54 -0700360{
Vinayak Menonea7a8a52018-12-03 13:46:32 +0530361 return ioremap_wc(handle, size);
Laura Abbottf9716072014-08-05 18:47:54 -0700362}
363
364void removed_unremap(struct device *dev, void *remapped_address, size_t size)
365{
366 iounmap(remapped_address);
367}
368
369const struct dma_map_ops removed_dma_ops = {
370 .alloc = removed_alloc,
371 .free = removed_free,
372 .mmap = removed_mmap,
373 .map_page = removed_map_page,
374 .unmap_page = removed_unmap_page,
375 .map_sg = removed_map_sg,
376 .unmap_sg = removed_unmap_sg,
377 .sync_single_for_cpu = removed_sync_single_for_cpu,
378 .sync_single_for_device = removed_sync_single_for_device,
379 .sync_sg_for_cpu = removed_sync_sg_for_cpu,
380 .sync_sg_for_device = removed_sync_sg_for_device,
381 .remap = removed_remap,
382 .unremap = removed_unremap,
383};
384EXPORT_SYMBOL(removed_dma_ops);
385
386#ifdef CONFIG_OF_RESERVED_MEM
387#include <linux/of.h>
388#include <linux/of_fdt.h>
389#include <linux/of_reserved_mem.h>
390
391static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
392{
393 struct removed_region *mem = rmem->priv;
394
395 if (!mem && dma_init_removed_memory(rmem->base, rmem->size, &mem)) {
396 pr_info("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
397 &rmem->base, (unsigned long)rmem->size / SZ_1M);
398 return -EINVAL;
399 }
Patrick Dalyb5520a52016-08-17 15:43:08 -0700400 mem->fixup = rmem->fixup;
Laura Abbottf9716072014-08-05 18:47:54 -0700401 set_dma_ops(dev, &removed_dma_ops);
402 rmem->priv = mem;
403 dma_assign_removed_region(dev, mem);
404 return 0;
405}
406
407static void rmem_dma_device_release(struct reserved_mem *rmem,
408 struct device *dev)
409{
410 dev->dma_mem = NULL;
411}
412
413static const struct reserved_mem_ops removed_mem_ops = {
414 .device_init = rmem_dma_device_init,
415 .device_release = rmem_dma_device_release,
416};
417
418static int __init removed_dma_setup(struct reserved_mem *rmem)
419{
Patrick Dalyb5520a52016-08-17 15:43:08 -0700420 unsigned long node = rmem->fdt_node;
421 int nomap, fixup;
422
423 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
424 fixup = of_get_flat_dt_prop(node, "no-map-fixup", NULL) != NULL;
425
426 if (nomap && fixup) {
427 pr_err("Removed memory: nomap & nomap-fixup can't co-exist\n");
428 return -EINVAL;
429 }
430
431 rmem->fixup = fixup;
432 if (rmem->fixup) {
433 /* Architecture specific contiguous memory fixup only for
434 * no-map-fixup to split mappings
435 */
436 dma_contiguous_early_fixup(rmem->base, rmem->size);
437 }
438
Laura Abbottf9716072014-08-05 18:47:54 -0700439 rmem->ops = &removed_mem_ops;
440 pr_info("Removed memory: created DMA memory pool at %pa, size %ld MiB\n",
441 &rmem->base, (unsigned long)rmem->size / SZ_1M);
442 return 0;
443}
444RESERVEDMEM_OF_DECLARE(dma, "removed-dma-pool", removed_dma_setup);
445#endif