blob: 1165161e472ccf731c04c3f45d5c7810df1d1e82 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/consistent.c
3 *
Paul Mundt8a7bcf02007-11-11 17:07:06 +09004 * Copyright (C) 2004 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Magnus Dammf93e97e2008-01-24 18:35:10 +09006 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/mm.h>
Paul Mundt7b41f562009-04-14 15:22:15 +090013#include <linux/init.h>
Magnus Damm1eca5c92008-07-16 19:02:54 +090014#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/dma-mapping.h>
Paul Mundtf802d962009-04-09 10:36:54 -070016#include <linux/dma-debug.h>
Paul Mundt7b41f562009-04-14 15:22:15 +090017#include <linux/io.h>
Paul Mundt73c926b2009-10-20 12:55:56 +090018#include <linux/module.h>
Paul Mundt26ff6c12006-09-27 15:13:36 +090019#include <asm/cacheflush.h>
20#include <asm/addrspace.h>
Paul Mundt7b41f562009-04-14 15:22:15 +090021
22#define PREALLOC_DMA_DEBUG_ENTRIES 4096
23
Paul Mundt73c926b2009-10-20 12:55:56 +090024struct dma_map_ops *dma_ops;
25EXPORT_SYMBOL(dma_ops);
26
Paul Mundt7b41f562009-04-14 15:22:15 +090027static int __init dma_init(void)
28{
29 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
Paul Mundt73c926b2009-10-20 12:55:56 +090030
31 no_iommu_init();
Paul Mundt7b41f562009-04-14 15:22:15 +090032 return 0;
33}
34fs_initcall(dma_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Magnus Dammf93e97e2008-01-24 18:35:10 +090036void *dma_alloc_coherent(struct device *dev, size_t size,
37 dma_addr_t *dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Magnus Damm2a3eeba2008-01-25 12:42:48 +090039 void *ret, *ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090040 int order = get_order(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Dmitry Baryshkov9de90ac2008-07-18 13:30:31 +040042 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
43 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Magnus Dammf93e97e2008-01-24 18:35:10 +090045 ret = (void *)__get_free_pages(gfp, order);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090046 if (!ret)
47 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Magnus Damm2a3eeba2008-01-25 12:42:48 +090049 memset(ret, 0, size);
50 /*
51 * Pages from the page allocator may have data present in
52 * cache. So flush the cache before using uncached memory.
53 */
54 dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
55
Paul Mundtfa439722008-09-04 18:53:58 +090056 ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090057 if (!ret_nocache) {
58 free_pages((unsigned long)ret, order);
59 return NULL;
Magnus Dammf93e97e2008-01-24 18:35:10 +090060 }
Magnus Damm2a3eeba2008-01-25 12:42:48 +090061
Magnus Dammda9fdc82008-12-17 17:18:45 +090062 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
63
Magnus Damm2a3eeba2008-01-25 12:42:48 +090064 *dma_handle = virt_to_phys(ret);
Paul Mundtf802d962009-04-09 10:36:54 -070065
66 debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
67
Magnus Damm2a3eeba2008-01-25 12:42:48 +090068 return ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090069}
70EXPORT_SYMBOL(dma_alloc_coherent);
71
72void dma_free_coherent(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Magnus Dammf93e97e2008-01-24 18:35:10 +090075 int order = get_order(size);
Magnus Dammda9fdc82008-12-17 17:18:45 +090076 unsigned long pfn = dma_handle >> PAGE_SHIFT;
77 int k;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Paul Mundtf802d962009-04-09 10:36:54 -070079 WARN_ON(irqs_disabled()); /* for portability */
80
81 if (dma_release_from_coherent(dev, order, vaddr))
82 return;
83
84 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
85 for (k = 0; k < (1 << order); k++)
86 __free_pages(pfn_to_page(pfn + k), 0);
87 iounmap(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
Magnus Dammf93e97e2008-01-24 18:35:10 +090089EXPORT_SYMBOL(dma_free_coherent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Magnus Dammf93e97e2008-01-24 18:35:10 +090091void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
92 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Matt Fleming31051212009-10-06 21:22:30 +000094#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
Paul Mundt8a7bcf02007-11-11 17:07:06 +090095 void *p1addr = vaddr;
96#else
97 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
98#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100 switch (direction) {
101 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -0700102 __flush_invalidate_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 break;
104 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -0700105 __flush_wback_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 break;
107 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -0700108 __flush_purge_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 break;
110 default:
111 BUG();
112 }
113}
Magnus Dammf93e97e2008-01-24 18:35:10 +0900114EXPORT_SYMBOL(dma_cache_sync);
Magnus Damm1eca5c92008-07-16 19:02:54 +0900115
Magnus Damm0c13bf12008-08-11 15:13:24 +0900116static int __init memchunk_setup(char *str)
117{
118 return 1; /* accept anything that begins with "memchunk." */
119}
120__setup("memchunk.", memchunk_setup);
121
Magnus Dammc773d8a2008-08-27 18:21:29 +0900122static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
Magnus Damm0c13bf12008-08-11 15:13:24 +0900123{
124 char *p = boot_command_line;
125 int k = strlen(name);
126
127 while ((p = strstr(p, "memchunk."))) {
128 p += 9; /* strlen("memchunk.") */
129 if (!strncmp(name, p, k) && p[k] == '=') {
130 p += k + 1;
131 *sizep = memparse(p, NULL);
132 pr_info("%s: forcing memory chunk size to 0x%08lx\n",
133 name, *sizep);
134 break;
135 }
136 }
137}
138
Magnus Dammc773d8a2008-08-27 18:21:29 +0900139int __init platform_resource_setup_memory(struct platform_device *pdev,
140 char *name, unsigned long memsize)
Magnus Damm1eca5c92008-07-16 19:02:54 +0900141{
142 struct resource *r;
143 dma_addr_t dma_handle;
144 void *buf;
145
146 r = pdev->resource + pdev->num_resources - 1;
147 if (r->flags) {
148 pr_warning("%s: unable to find empty space for resource\n",
149 name);
150 return -EINVAL;
151 }
152
Magnus Damm0c13bf12008-08-11 15:13:24 +0900153 memchunk_cmdline_override(name, &memsize);
154 if (!memsize)
155 return 0;
156
Magnus Damm1eca5c92008-07-16 19:02:54 +0900157 buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
158 if (!buf) {
159 pr_warning("%s: unable to allocate memory\n", name);
160 return -ENOMEM;
161 }
162
163 memset(buf, 0, memsize);
164
165 r->flags = IORESOURCE_MEM;
166 r->start = dma_handle;
167 r->end = r->start + memsize - 1;
168 r->name = name;
169 return 0;
170}