blob: 6cf1d924209a842f365b03c9ab6fc875c2b5b244 [file] [log] [blame]
Vineet Gupta1162b072013-01-18 15:12:20 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * DMA Coherent API Notes
11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessintg it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based.
Vineet Gupta1162b072013-01-18 15:12:20 +053017 */
18
19#include <linux/dma-mapping.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030020#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +053021#include <asm/cacheflush.h>
22
Christoph Hellwig052c96d2016-01-20 15:01:26 -080023
24static void *arc_dma_alloc(struct device *dev, size_t size,
25 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053026{
Vineet Guptad98a15a2016-03-14 15:03:59 +053027 unsigned long order = get_order(size);
28 struct page *page;
29 phys_addr_t paddr;
30 void *kvaddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053031
Vineet Guptad98a15a2016-03-14 15:03:59 +053032 page = alloc_pages(gfp, order);
33 if (!page)
Vineet Gupta1162b072013-01-18 15:12:20 +053034 return NULL;
35
Vineet Guptad98a15a2016-03-14 15:03:59 +053036 /* This is linear addr (0x8000_0000 based) */
37 paddr = page_to_phys(page);
38
39 /* For now bus address is exactly same as paddr */
40 *dma_handle = paddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053041
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030042 /*
43 * IOC relies on all data (even coherent DMA data) being in cache
44 * Thus allocate normal cached memory
45 *
46 * The gains with IOC are two pronged:
47 * -For streaming data, elides needs for cache maintenance, saving
48 * cycles in flush code, and bus bandwidth as all the lines of a
49 * buffer need to be flushed out to memory
50 * -For coherent data, Read/Write to buffers terminate early in cache
51 * (vs. always going to memory - thus are faster)
52 */
Christoph Hellwig052c96d2016-01-20 15:01:26 -080053 if ((is_isa_arcv2() && ioc_exists) ||
54 dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
55 return paddr;
Vineet Gupta1162b072013-01-18 15:12:20 +053056
57 /* This is kernel Virtual address (0x7000_0000 based) */
58 kvaddr = ioremap_nocache((unsigned long)paddr, size);
Vineet Guptad98a15a2016-03-14 15:03:59 +053059 if (kvaddr == NULL) {
60 __free_pages(page, order);
Vineet Guptaf718c2e2015-07-03 10:40:43 +053061 return NULL;
Vineet Guptad98a15a2016-03-14 15:03:59 +053062 }
Vineet Gupta1162b072013-01-18 15:12:20 +053063
Vineet Gupta795f4552015-04-03 12:37:07 +030064 /*
65 * Evict any existing L1 and/or L2 lines for the backing page
66 * in case it was used earlier as a normal "cached" page.
67 * Yeah this bit us - STAR 9000898266
68 *
69 * Although core does call flush_cache_vmap(), it gets kvaddr hence
70 * can't be used to efficiently flush L1 and/or L2 which need paddr
71 * Currently flush_cache_vmap nukes the L1 cache completely which
72 * will be optimized as a separate commit
73 */
74 dma_cache_wback_inv((unsigned long)paddr, size);
75
Vineet Gupta1162b072013-01-18 15:12:20 +053076 return kvaddr;
77}
Vineet Gupta1162b072013-01-18 15:12:20 +053078
Christoph Hellwig052c96d2016-01-20 15:01:26 -080079static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
80 dma_addr_t dma_handle, struct dma_attrs *attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053081{
Vineet Guptad98a15a2016-03-14 15:03:59 +053082 struct page *page = virt_to_page(dma_handle);
83
Christoph Hellwig052c96d2016-01-20 15:01:26 -080084 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
85 !(is_isa_arcv2() && ioc_exists))
86 iounmap((void __force __iomem *)vaddr);
Vineet Gupta1162b072013-01-18 15:12:20 +053087
Vineet Guptad98a15a2016-03-14 15:03:59 +053088 __free_pages(page, get_order(size));
Vineet Gupta1162b072013-01-18 15:12:20 +053089}
Vineet Gupta1162b072013-01-18 15:12:20 +053090
91/*
Christoph Hellwig052c96d2016-01-20 15:01:26 -080092 * streaming DMA Mapping API...
93 * CPU accesses page via normal paddr, thus needs to explicitly made
94 * consistent before each use
Vineet Gupta1162b072013-01-18 15:12:20 +053095 */
Christoph Hellwig052c96d2016-01-20 15:01:26 -080096static void _dma_cache_sync(unsigned long paddr, size_t size,
97 enum dma_data_direction dir)
Vineet Gupta1162b072013-01-18 15:12:20 +053098{
Christoph Hellwig052c96d2016-01-20 15:01:26 -080099 switch (dir) {
100 case DMA_FROM_DEVICE:
101 dma_cache_inv(paddr, size);
102 break;
103 case DMA_TO_DEVICE:
104 dma_cache_wback(paddr, size);
105 break;
106 case DMA_BIDIRECTIONAL:
107 dma_cache_wback_inv(paddr, size);
108 break;
109 default:
110 pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
111 }
Vineet Gupta1162b072013-01-18 15:12:20 +0530112}
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800113
114static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
115 unsigned long offset, size_t size, enum dma_data_direction dir,
116 struct dma_attrs *attrs)
117{
118 unsigned long paddr = page_to_phys(page) + offset;
119 _dma_cache_sync(paddr, size, dir);
120 return (dma_addr_t)paddr;
121}
122
123static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
124 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
125{
126 struct scatterlist *s;
127 int i;
128
129 for_each_sg(sg, s, nents, i)
130 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
131 s->length, dir);
132
133 return nents;
134}
135
136static void arc_dma_sync_single_for_cpu(struct device *dev,
137 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
138{
139 _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
140}
141
142static void arc_dma_sync_single_for_device(struct device *dev,
143 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
144{
145 _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
146}
147
148static void arc_dma_sync_sg_for_cpu(struct device *dev,
149 struct scatterlist *sglist, int nelems,
150 enum dma_data_direction dir)
151{
152 int i;
153 struct scatterlist *sg;
154
155 for_each_sg(sglist, sg, nelems, i)
156 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
157}
158
159static void arc_dma_sync_sg_for_device(struct device *dev,
160 struct scatterlist *sglist, int nelems,
161 enum dma_data_direction dir)
162{
163 int i;
164 struct scatterlist *sg;
165
166 for_each_sg(sglist, sg, nelems, i)
167 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
168}
169
170static int arc_dma_supported(struct device *dev, u64 dma_mask)
171{
172 /* Support 32 bit DMA mask exclusively */
173 return dma_mask == DMA_BIT_MASK(32);
174}
175
176struct dma_map_ops arc_dma_ops = {
177 .alloc = arc_dma_alloc,
178 .free = arc_dma_free,
179 .map_page = arc_dma_map_page,
180 .map_sg = arc_dma_map_sg,
181 .sync_single_for_device = arc_dma_sync_single_for_device,
182 .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
183 .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
184 .sync_sg_for_device = arc_dma_sync_sg_for_device,
185 .dma_supported = arc_dma_supported,
186};
187EXPORT_SYMBOL(arc_dma_ops);