blob: 6ea3aab508f2211c76999d5c5d970620aa47b6a5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/consistent.c
3 *
Paul Mundt8a7bcf02007-11-11 17:07:06 +09004 * Copyright (C) 2004 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Magnus Dammf93e97e2008-01-24 18:35:10 +09006 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/mm.h>
Paul Mundt7b41f562009-04-14 15:22:15 +090013#include <linux/init.h>
Magnus Damm1eca5c92008-07-16 19:02:54 +090014#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/dma-mapping.h>
Paul Mundtf802d962009-04-09 10:36:54 -070016#include <linux/dma-debug.h>
Paul Mundt7b41f562009-04-14 15:22:15 +090017#include <linux/io.h>
Paul Mundt73c926b2009-10-20 12:55:56 +090018#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/gfp.h>
Paul Mundt26ff6c12006-09-27 15:13:36 +090020#include <asm/cacheflush.h>
21#include <asm/addrspace.h>
Paul Mundt7b41f562009-04-14 15:22:15 +090022
23#define PREALLOC_DMA_DEBUG_ENTRIES 4096
24
Bart Van Assche52997092017-01-20 13:04:01 -080025const struct dma_map_ops *dma_ops;
Paul Mundt73c926b2009-10-20 12:55:56 +090026EXPORT_SYMBOL(dma_ops);
27
Paul Mundt7b41f562009-04-14 15:22:15 +090028static int __init dma_init(void)
29{
30 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
31 return 0;
32}
33fs_initcall(dma_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Paul Mundtf32154c92009-10-26 09:50:51 +090035void *dma_generic_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicz552c0d32011-12-14 12:11:13 +010036 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070037 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Magnus Damm2a3eeba2008-01-25 12:42:48 +090039 void *ret, *ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090040 int order = get_order(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Andrew Murray57682822010-08-04 16:38:35 +090042 gfp |= __GFP_ZERO;
43
Magnus Dammf93e97e2008-01-24 18:35:10 +090044 ret = (void *)__get_free_pages(gfp, order);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090045 if (!ret)
46 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Magnus Damm2a3eeba2008-01-25 12:42:48 +090048 /*
49 * Pages from the page allocator may have data present in
50 * cache. So flush the cache before using uncached memory.
51 */
Christoph Hellwige0c65842017-08-27 10:35:40 +020052 sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090053
Paul Mundtfa439722008-09-04 18:53:58 +090054 ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090055 if (!ret_nocache) {
56 free_pages((unsigned long)ret, order);
57 return NULL;
Magnus Dammf93e97e2008-01-24 18:35:10 +090058 }
Magnus Damm2a3eeba2008-01-25 12:42:48 +090059
Magnus Dammda9fdc82008-12-17 17:18:45 +090060 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
61
Magnus Damm2a3eeba2008-01-25 12:42:48 +090062 *dma_handle = virt_to_phys(ret);
Paul Mundtf802d962009-04-09 10:36:54 -070063
Magnus Damm2a3eeba2008-01-25 12:42:48 +090064 return ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090065}
Magnus Dammf93e97e2008-01-24 18:35:10 +090066
Paul Mundtf32154c92009-10-26 09:50:51 +090067void dma_generic_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicz552c0d32011-12-14 12:11:13 +010068 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070069 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070{
Magnus Dammf93e97e2008-01-24 18:35:10 +090071 int order = get_order(size);
Magnus Dammda9fdc82008-12-17 17:18:45 +090072 unsigned long pfn = dma_handle >> PAGE_SHIFT;
73 int k;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Paul Mundtf802d962009-04-09 10:36:54 -070075 for (k = 0; k < (1 << order); k++)
76 __free_pages(pfn_to_page(pfn + k), 0);
Paul Mundtf32154c92009-10-26 09:50:51 +090077
Paul Mundtf802d962009-04-09 10:36:54 -070078 iounmap(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Christoph Hellwige0c65842017-08-27 10:35:40 +020081void sh_sync_dma_for_device(void *vaddr, size_t size,
Magnus Dammf93e97e2008-01-24 18:35:10 +090082 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
Paul Mundte2fcf742010-11-04 12:32:24 +090084 void *addr;
85
86 addr = __in_29bit_mode() ?
Paul Mundt3f9b85202011-05-31 14:38:29 +090087 (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 switch (direction) {
90 case DMA_FROM_DEVICE: /* invalidate only */
Paul Mundte2fcf742010-11-04 12:32:24 +090091 __flush_invalidate_region(addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 break;
93 case DMA_TO_DEVICE: /* writeback only */
Paul Mundte2fcf742010-11-04 12:32:24 +090094 __flush_wback_region(addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 break;
96 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Paul Mundte2fcf742010-11-04 12:32:24 +090097 __flush_purge_region(addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 break;
99 default:
100 BUG();
101 }
102}
Christoph Hellwige0c65842017-08-27 10:35:40 +0200103EXPORT_SYMBOL(sh_sync_dma_for_device);
Magnus Damm1eca5c92008-07-16 19:02:54 +0900104
Magnus Damm0c13bf12008-08-11 15:13:24 +0900105static int __init memchunk_setup(char *str)
106{
107 return 1; /* accept anything that begins with "memchunk." */
108}
109__setup("memchunk.", memchunk_setup);
110
Magnus Dammc773d8a2008-08-27 18:21:29 +0900111static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
Magnus Damm0c13bf12008-08-11 15:13:24 +0900112{
113 char *p = boot_command_line;
114 int k = strlen(name);
115
116 while ((p = strstr(p, "memchunk."))) {
117 p += 9; /* strlen("memchunk.") */
118 if (!strncmp(name, p, k) && p[k] == '=') {
119 p += k + 1;
120 *sizep = memparse(p, NULL);
121 pr_info("%s: forcing memory chunk size to 0x%08lx\n",
122 name, *sizep);
123 break;
124 }
125 }
126}
127
Magnus Dammc773d8a2008-08-27 18:21:29 +0900128int __init platform_resource_setup_memory(struct platform_device *pdev,
129 char *name, unsigned long memsize)
Magnus Damm1eca5c92008-07-16 19:02:54 +0900130{
131 struct resource *r;
132 dma_addr_t dma_handle;
133 void *buf;
134
135 r = pdev->resource + pdev->num_resources - 1;
136 if (r->flags) {
137 pr_warning("%s: unable to find empty space for resource\n",
138 name);
139 return -EINVAL;
140 }
141
Magnus Damm0c13bf12008-08-11 15:13:24 +0900142 memchunk_cmdline_override(name, &memsize);
143 if (!memsize)
144 return 0;
145
Magnus Damm1eca5c92008-07-16 19:02:54 +0900146 buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
147 if (!buf) {
148 pr_warning("%s: unable to allocate memory\n", name);
149 return -ENOMEM;
150 }
151
152 memset(buf, 0, memsize);
153
154 r->flags = IORESOURCE_MEM;
155 r->start = dma_handle;
156 r->end = r->start + memsize - 1;
157 r->name = name;
158 return 0;
159}