blob: 41dd008849757f2cbeec99f00cdf7193e80ac874 [file] [log] [blame]
David Daney5b3b1682009-01-08 16:46:40 -08001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya.
David Daneyb93b2ab2010-10-01 13:27:34 -070011 * Copyright (C) 2010 Cavium Networks, Inc.
David Daney5b3b1682009-01-08 16:46:40 -080012 */
David Daneye8635b42009-04-23 17:44:38 -070013#include <linux/dma-mapping.h>
David Daneye8635b42009-04-23 17:44:38 -070014#include <linux/scatterlist.h>
David Daneyb93b2ab2010-10-01 13:27:34 -070015#include <linux/bootmem.h>
Paul Gortmakercae39d12011-07-28 18:46:31 -040016#include <linux/export.h>
David Daneyb93b2ab2010-10-01 13:27:34 -070017#include <linux/swiotlb.h>
18#include <linux/types.h>
19#include <linux/init.h>
20#include <linux/mm.h>
David Daneye8635b42009-04-23 17:44:38 -070021
David Daneyb93b2ab2010-10-01 13:27:34 -070022#include <asm/bootinfo.h>
David Daneye8635b42009-04-23 17:44:38 -070023
24#include <asm/octeon/octeon.h>
David Daney5b3b1682009-01-08 16:46:40 -080025
David Daneye8635b42009-04-23 17:44:38 -070026#ifdef CONFIG_PCI
David Daney01a62212009-06-29 17:18:51 -070027#include <asm/octeon/pci-octeon.h>
David Daneyb93b2ab2010-10-01 13:27:34 -070028#include <asm/octeon/cvmx-npi-defs.h>
29#include <asm/octeon/cvmx-pci-defs.h>
David Daneye8635b42009-04-23 17:44:38 -070030
David Daneyb93b2ab2010-10-01 13:27:34 -070031static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
David Daney5b3b1682009-01-08 16:46:40 -080032{
David Daneyb93b2ab2010-10-01 13:27:34 -070033 if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
34 return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
David Daneye8635b42009-04-23 17:44:38 -070035 else
David Daneyb93b2ab2010-10-01 13:27:34 -070036 return paddr;
David Daney5b3b1682009-01-08 16:46:40 -080037}
38
David Daneyb93b2ab2010-10-01 13:27:34 -070039static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
David Daney5b3b1682009-01-08 16:46:40 -080040{
David Daneyb93b2ab2010-10-01 13:27:34 -070041 if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
42 return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
43 else
44 return daddr;
45}
David Daneye8635b42009-04-23 17:44:38 -070046
David Daneyb93b2ab2010-10-01 13:27:34 -070047static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
48{
49 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
50 paddr -= 0x400000000ull;
51 return octeon_hole_phys_to_dma(paddr);
52}
53
54static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
55{
56 daddr = octeon_hole_dma_to_phys(daddr);
57
58 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
59 daddr += 0x400000000ull;
60
61 return daddr;
62}
63
David Daney714c1f52011-11-22 14:47:04 +000064static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
65{
66 return octeon_hole_phys_to_dma(paddr);
67}
68
69static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
70{
71 return octeon_hole_dma_to_phys(daddr);
72}
73
David Daneyb93b2ab2010-10-01 13:27:34 -070074static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
75{
76 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
77 paddr -= 0x400000000ull;
78
79 /* Anything in the BAR1 hole or above goes via BAR2 */
80 if (paddr >= 0xf0000000ull)
81 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
82
83 return paddr;
84}
85
86static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
87{
88 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
89 daddr -= OCTEON_BAR2_PCI_ADDRESS;
90
91 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
92 daddr += 0x400000000ull;
93 return daddr;
94}
95
96static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
97 phys_addr_t paddr)
98{
99 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
100 paddr -= 0x400000000ull;
101
102 /* Anything not in the BAR1 range goes via BAR2 */
103 if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
104 paddr = paddr - octeon_bar1_pci_phys;
105 else
106 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
107
108 return paddr;
109}
110
111static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
112 dma_addr_t daddr)
113{
114 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
115 daddr -= OCTEON_BAR2_PCI_ADDRESS;
116 else
117 daddr += octeon_bar1_pci_phys;
118
119 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
120 daddr += 0x400000000ull;
121 return daddr;
122}
123
124#endif /* CONFIG_PCI */
125
126static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
127 unsigned long offset, size_t size, enum dma_data_direction direction,
128 struct dma_attrs *attrs)
129{
130 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
131 direction, attrs);
132 mb();
133
134 return daddr;
135}
136
137static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
138 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
139{
140 int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
141 mb();
142 return r;
143}
144
145static void octeon_dma_sync_single_for_device(struct device *dev,
146 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
147{
148 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
149 mb();
150}
151
152static void octeon_dma_sync_sg_for_device(struct device *dev,
153 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
154{
155 swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
156 mb();
157}
158
159static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200160 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
David Daneyb93b2ab2010-10-01 13:27:34 -0700161{
162 void *ret;
163
164 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
165 return ret;
166
167 /* ignore region specifiers */
168 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
169
170#ifdef CONFIG_ZONE_DMA
171 if (dev == NULL)
172 gfp |= __GFP_DMA;
173 else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
174 gfp |= __GFP_DMA;
175 else
176#endif
177#ifdef CONFIG_ZONE_DMA32
178 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
179 gfp |= __GFP_DMA32;
180 else
181#endif
182 ;
183
184 /* Don't invoke OOM killer */
185 gfp |= __GFP_NORETRY;
186
187 ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
188
189 mb();
190
191 return ret;
192}
193
194static void octeon_dma_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200195 void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
David Daneyb93b2ab2010-10-01 13:27:34 -0700196{
197 int order = get_order(size);
198
199 if (dma_release_from_coherent(dev, order, vaddr))
David Daneye8635b42009-04-23 17:44:38 -0700200 return;
201
David Daneyb93b2ab2010-10-01 13:27:34 -0700202 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
203}
David Daneye8635b42009-04-23 17:44:38 -0700204
David Daneyb93b2ab2010-10-01 13:27:34 -0700205static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
206{
207 return paddr;
208}
David Daneye8635b42009-04-23 17:44:38 -0700209
David Daneyb93b2ab2010-10-01 13:27:34 -0700210static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
211{
212 return daddr;
213}
David Daneye8635b42009-04-23 17:44:38 -0700214
David Daneyb93b2ab2010-10-01 13:27:34 -0700215struct octeon_dma_map_ops {
216 struct dma_map_ops dma_map_ops;
217 dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
218 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
219};
220
221dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
222{
223 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
224 struct octeon_dma_map_ops,
225 dma_map_ops);
226
227 return ops->phys_to_dma(dev, paddr);
228}
229EXPORT_SYMBOL(phys_to_dma);
230
231phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
232{
233 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
234 struct octeon_dma_map_ops,
235 dma_map_ops);
236
237 return ops->dma_to_phys(dev, daddr);
238}
239EXPORT_SYMBOL(dma_to_phys);
240
241static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
242 .dma_map_ops = {
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200243 .alloc = octeon_dma_alloc_coherent,
244 .free = octeon_dma_free_coherent,
David Daneyb93b2ab2010-10-01 13:27:34 -0700245 .map_page = octeon_dma_map_page,
246 .unmap_page = swiotlb_unmap_page,
247 .map_sg = octeon_dma_map_sg,
248 .unmap_sg = swiotlb_unmap_sg_attrs,
249 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
250 .sync_single_for_device = octeon_dma_sync_single_for_device,
251 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
252 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
253 .mapping_error = swiotlb_dma_mapping_error,
254 .dma_supported = swiotlb_dma_supported
255 },
256 .phys_to_dma = octeon_unity_phys_to_dma,
257 .dma_to_phys = octeon_unity_dma_to_phys
258};
259
260char *octeon_swiotlb;
261
262void __init plat_swiotlb_setup(void)
263{
264 int i;
265 phys_t max_addr;
266 phys_t addr_size;
267 size_t swiotlbsize;
268 unsigned long swiotlb_nslabs;
269
270 max_addr = 0;
271 addr_size = 0;
272
273 for (i = 0 ; i < boot_mem_map.nr_map; i++) {
274 struct boot_mem_map_entry *e = &boot_mem_map.map[i];
David Daney714c1f52011-11-22 14:47:04 +0000275 if (e->type != BOOT_MEM_RAM && e->type != BOOT_MEM_INIT_RAM)
David Daneyb93b2ab2010-10-01 13:27:34 -0700276 continue;
277
278 /* These addresses map low for PCI. */
David Daney714c1f52011-11-22 14:47:04 +0000279 if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX))
David Daneyb93b2ab2010-10-01 13:27:34 -0700280 continue;
281
282 addr_size += e->size;
283
284 if (max_addr < e->addr + e->size)
285 max_addr = e->addr + e->size;
286
David Daneye8635b42009-04-23 17:44:38 -0700287 }
288
David Daneyb93b2ab2010-10-01 13:27:34 -0700289 swiotlbsize = PAGE_SIZE;
David Daneye8635b42009-04-23 17:44:38 -0700290
David Daneyb93b2ab2010-10-01 13:27:34 -0700291#ifdef CONFIG_PCI
292 /*
293 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
294 * size to a maximum of 64MB
295 */
296 if (OCTEON_IS_MODEL(OCTEON_CN31XX)
297 || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
298 swiotlbsize = addr_size / 4;
299 if (swiotlbsize > 64 * (1<<20))
300 swiotlbsize = 64 * (1<<20);
301 } else if (max_addr > 0xf0000000ul) {
302 /*
303 * Otherwise only allocate a big iotlb if there is
304 * memory past the BAR1 hole.
305 */
306 swiotlbsize = 64 * (1<<20);
307 }
David Daneye8635b42009-04-23 17:44:38 -0700308#endif
David Daney714c1f52011-11-22 14:47:04 +0000309#ifdef CONFIG_USB_OCTEON_OHCI
310 /* OCTEON II ohci is only 32-bit. */
311 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul)
312 swiotlbsize = 64 * (1<<20);
313#endif
David Daneyb93b2ab2010-10-01 13:27:34 -0700314 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
315 swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
316 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
317
318 octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
319
320 swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1);
321
322 mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
David Daney5b3b1682009-01-08 16:46:40 -0800323}
David Daneyb93b2ab2010-10-01 13:27:34 -0700324
325#ifdef CONFIG_PCI
326static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
327 .dma_map_ops = {
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200328 .alloc = octeon_dma_alloc_coherent,
329 .free = octeon_dma_free_coherent,
David Daneyb93b2ab2010-10-01 13:27:34 -0700330 .map_page = octeon_dma_map_page,
331 .unmap_page = swiotlb_unmap_page,
332 .map_sg = octeon_dma_map_sg,
333 .unmap_sg = swiotlb_unmap_sg_attrs,
334 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
335 .sync_single_for_device = octeon_dma_sync_single_for_device,
336 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
337 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
338 .mapping_error = swiotlb_dma_mapping_error,
339 .dma_supported = swiotlb_dma_supported
340 },
341};
342
343struct dma_map_ops *octeon_pci_dma_map_ops;
344
345void __init octeon_pci_dma_init(void)
346{
347 switch (octeon_dma_bar_type) {
David Daney714c1f52011-11-22 14:47:04 +0000348 case OCTEON_DMA_BAR_TYPE_PCIE2:
349 _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen2_phys_to_dma;
350 _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen2_dma_to_phys;
351 break;
David Daneyb93b2ab2010-10-01 13:27:34 -0700352 case OCTEON_DMA_BAR_TYPE_PCIE:
353 _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
354 _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
355 break;
356 case OCTEON_DMA_BAR_TYPE_BIG:
357 _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
358 _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
359 break;
360 case OCTEON_DMA_BAR_TYPE_SMALL:
361 _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
362 _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
363 break;
364 default:
365 BUG();
366 }
367 octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
368}
369#endif /* CONFIG_PCI */