blob: 20afc65e22dc780c69dea280acfc6907a1680e9f [file] [log] [blame]
Vineet Gupta1162b072013-01-18 15:12:20 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * DMA Coherent API Notes
11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
Andrea Gelmini25474762016-05-21 13:45:35 +020013 * implemented by accessing it using a kernel virtual address, with
Vineet Gupta1162b072013-01-18 15:12:20 +053014 * Cache bit off in the TLB entry.
15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based.
Vineet Gupta1162b072013-01-18 15:12:20 +053017 */
18
19#include <linux/dma-mapping.h>
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030020#include <asm/cache.h>
Vineet Gupta1162b072013-01-18 15:12:20 +053021#include <asm/cacheflush.h>
22
Christoph Hellwig052c96d2016-01-20 15:01:26 -080023
24static void *arc_dma_alloc(struct device *dev, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070025 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053026{
Vineet Guptad98a15a2016-03-14 15:03:59 +053027 unsigned long order = get_order(size);
28 struct page *page;
29 phys_addr_t paddr;
30 void *kvaddr;
Vineet Gupta6b700392016-03-14 15:34:36 +053031 int need_coh = 1, need_kvaddr = 0;
Vineet Gupta1162b072013-01-18 15:12:20 +053032
Vineet Guptad98a15a2016-03-14 15:03:59 +053033 page = alloc_pages(gfp, order);
34 if (!page)
Vineet Gupta1162b072013-01-18 15:12:20 +053035 return NULL;
36
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030037 /*
38 * IOC relies on all data (even coherent DMA data) being in cache
39 * Thus allocate normal cached memory
40 *
41 * The gains with IOC are two pronged:
Vineet Gupta6b700392016-03-14 15:34:36 +053042 * -For streaming data, elides need for cache maintenance, saving
Alexey Brodkinf2b0b252015-05-25 19:54:28 +030043 * cycles in flush code, and bus bandwidth as all the lines of a
44 * buffer need to be flushed out to memory
45 * -For coherent data, Read/Write to buffers terminate early in cache
46 * (vs. always going to memory - thus are faster)
47 */
Christoph Hellwig052c96d2016-01-20 15:01:26 -080048 if ((is_isa_arcv2() && ioc_exists) ||
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070049 (attrs & DMA_ATTR_NON_CONSISTENT))
Vineet Gupta6b700392016-03-14 15:34:36 +053050 need_coh = 0;
51
52 /*
53 * - A coherent buffer needs MMU mapping to enforce non-cachability
54 * - A highmem page needs a virtual handle (hence MMU mapping)
55 * independent of cachability
56 */
57 if (PageHighMem(page) || need_coh)
58 need_kvaddr = 1;
59
60 /* This is linear addr (0x8000_0000 based) */
61 paddr = page_to_phys(page);
62
Vineet Guptaf2e3d552016-03-16 16:38:57 +053063 *dma_handle = plat_phys_to_dma(dev, paddr);
Vineet Gupta1162b072013-01-18 15:12:20 +053064
65 /* This is kernel Virtual address (0x7000_0000 based) */
Vineet Gupta6b700392016-03-14 15:34:36 +053066 if (need_kvaddr) {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053067 kvaddr = ioremap_nocache(paddr, size);
Vineet Gupta6b700392016-03-14 15:34:36 +053068 if (kvaddr == NULL) {
69 __free_pages(page, order);
70 return NULL;
71 }
72 } else {
Vineet Guptaf5db19e2016-03-16 15:04:39 +053073 kvaddr = (void *)(u32)paddr;
Vineet Guptad98a15a2016-03-14 15:03:59 +053074 }
Vineet Gupta1162b072013-01-18 15:12:20 +053075
Vineet Gupta795f4552015-04-03 12:37:07 +030076 /*
77 * Evict any existing L1 and/or L2 lines for the backing page
78 * in case it was used earlier as a normal "cached" page.
79 * Yeah this bit us - STAR 9000898266
80 *
81 * Although core does call flush_cache_vmap(), it gets kvaddr hence
82 * can't be used to efficiently flush L1 and/or L2 which need paddr
83 * Currently flush_cache_vmap nukes the L1 cache completely which
84 * will be optimized as a separate commit
85 */
Vineet Gupta6b700392016-03-14 15:34:36 +053086 if (need_coh)
Vineet Guptaf5db19e2016-03-16 15:04:39 +053087 dma_cache_wback_inv(paddr, size);
Vineet Gupta795f4552015-04-03 12:37:07 +030088
Vineet Gupta1162b072013-01-18 15:12:20 +053089 return kvaddr;
90}
Vineet Gupta1162b072013-01-18 15:12:20 +053091
Christoph Hellwig052c96d2016-01-20 15:01:26 -080092static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070093 dma_addr_t dma_handle, unsigned long attrs)
Vineet Gupta1162b072013-01-18 15:12:20 +053094{
Vladimir Kondratievb4dff282016-07-03 10:07:48 +030095 phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
96 struct page *page = virt_to_page(paddr);
Vineet Gupta6b700392016-03-14 15:34:36 +053097 int is_non_coh = 1;
Vineet Guptad98a15a2016-03-14 15:03:59 +053098
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070099 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
Vineet Gupta6b700392016-03-14 15:34:36 +0530100 (is_isa_arcv2() && ioc_exists);
101
102 if (PageHighMem(page) || !is_non_coh)
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800103 iounmap((void __force __iomem *)vaddr);
Vineet Gupta1162b072013-01-18 15:12:20 +0530104
Vineet Guptad98a15a2016-03-14 15:03:59 +0530105 __free_pages(page, get_order(size));
Vineet Gupta1162b072013-01-18 15:12:20 +0530106}
Vineet Gupta1162b072013-01-18 15:12:20 +0530107
108/*
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800109 * streaming DMA Mapping API...
110 * CPU accesses page via normal paddr, thus needs to explicitly made
111 * consistent before each use
Vineet Gupta1162b072013-01-18 15:12:20 +0530112 */
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530113static void _dma_cache_sync(phys_addr_t paddr, size_t size,
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800114 enum dma_data_direction dir)
Vineet Gupta1162b072013-01-18 15:12:20 +0530115{
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800116 switch (dir) {
117 case DMA_FROM_DEVICE:
118 dma_cache_inv(paddr, size);
119 break;
120 case DMA_TO_DEVICE:
121 dma_cache_wback(paddr, size);
122 break;
123 case DMA_BIDIRECTIONAL:
124 dma_cache_wback_inv(paddr, size);
125 break;
126 default:
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530127 pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800128 }
Vineet Gupta1162b072013-01-18 15:12:20 +0530129}
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800130
131static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
132 unsigned long offset, size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700133 unsigned long attrs)
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800134{
Vineet Guptaf5db19e2016-03-16 15:04:39 +0530135 phys_addr_t paddr = page_to_phys(page) + offset;
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800136 _dma_cache_sync(paddr, size, dir);
Vineet Guptaf2e3d552016-03-16 16:38:57 +0530137 return plat_phys_to_dma(dev, paddr);
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800138}
139
140static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700141 int nents, enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800142{
143 struct scatterlist *s;
144 int i;
145
146 for_each_sg(sg, s, nents, i)
147 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
148 s->length, dir);
149
150 return nents;
151}
152
153static void arc_dma_sync_single_for_cpu(struct device *dev,
154 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
155{
Vineet Guptaf2e3d552016-03-16 16:38:57 +0530156 _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800157}
158
159static void arc_dma_sync_single_for_device(struct device *dev,
160 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
161{
Vineet Guptaf2e3d552016-03-16 16:38:57 +0530162 _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800163}
164
165static void arc_dma_sync_sg_for_cpu(struct device *dev,
166 struct scatterlist *sglist, int nelems,
167 enum dma_data_direction dir)
168{
169 int i;
170 struct scatterlist *sg;
171
172 for_each_sg(sglist, sg, nelems, i)
Vineet Gupta971573c2016-03-16 14:51:33 +0530173 _dma_cache_sync(sg_phys(sg), sg->length, dir);
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800174}
175
176static void arc_dma_sync_sg_for_device(struct device *dev,
177 struct scatterlist *sglist, int nelems,
178 enum dma_data_direction dir)
179{
180 int i;
181 struct scatterlist *sg;
182
183 for_each_sg(sglist, sg, nelems, i)
Vineet Gupta971573c2016-03-16 14:51:33 +0530184 _dma_cache_sync(sg_phys(sg), sg->length, dir);
Christoph Hellwig052c96d2016-01-20 15:01:26 -0800185}
186
187static int arc_dma_supported(struct device *dev, u64 dma_mask)
188{
189 /* Support 32 bit DMA mask exclusively */
190 return dma_mask == DMA_BIT_MASK(32);
191}
192
193struct dma_map_ops arc_dma_ops = {
194 .alloc = arc_dma_alloc,
195 .free = arc_dma_free,
196 .map_page = arc_dma_map_page,
197 .map_sg = arc_dma_map_sg,
198 .sync_single_for_device = arc_dma_sync_single_for_device,
199 .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
200 .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
201 .sync_sg_for_device = arc_dma_sync_sg_for_device,
202 .dma_supported = arc_dma_supported,
203};
204EXPORT_SYMBOL(arc_dma_ops);