blob: e3910d4db102ba24d43a0da454dc3fe00ff2ec2e [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Dynamic DMA mapping support
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * Derived from: arch/i386/kernel/pci-dma.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/string.h>
16#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Geert Uytterhoeven77cb6212012-07-11 14:02:04 -070018#include <linux/export.h>
David Howellsb920de12008-02-08 04:19:31 -080019#include <asm/io.h>
20
David Howells012c79b2010-01-08 14:43:21 -080021static unsigned long pci_sram_allocated = 0xbc000000;
22
Christoph Hellwigf1513412016-01-20 15:01:50 -080023static void *mn10300_dma_alloc(struct device *dev, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070024 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
David Howellsb920de12008-02-08 04:19:31 -080025{
26 unsigned long addr;
27 void *ret;
28
Julia Lawall6e0c64f2010-08-23 14:31:34 +010029 pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
30 dev ? dev_name(dev) : "?", size, gfp);
David Howells012c79b2010-01-08 14:43:21 -080031
32 if (0xbe000000 - pci_sram_allocated >= size) {
33 size = (size + 255) & ~255;
34 addr = pci_sram_allocated;
35 pci_sram_allocated += size;
36 ret = (void *) addr;
37 goto done;
38 }
39
David Howellsb920de12008-02-08 04:19:31 -080040 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
41 gfp |= GFP_DMA;
42
43 addr = __get_free_pages(gfp, get_order(size));
44 if (!addr)
45 return NULL;
46
47 /* map the coherent memory through the uncached memory window */
48 ret = (void *) (addr | 0x20000000);
49
50 /* fill the memory with obvious rubbish */
51 memset((void *) addr, 0xfb, size);
52
53 /* write back and evict all cache lines covering this region */
54 mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
55
David Howells012c79b2010-01-08 14:43:21 -080056done:
David Howellsb920de12008-02-08 04:19:31 -080057 *dma_handle = virt_to_bus((void *) addr);
David Howells012c79b2010-01-08 14:43:21 -080058 printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
David Howellsb920de12008-02-08 04:19:31 -080059 return ret;
60}
David Howellsb920de12008-02-08 04:19:31 -080061
Christoph Hellwigf1513412016-01-20 15:01:50 -080062static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070063 dma_addr_t dma_handle, unsigned long attrs)
David Howellsb920de12008-02-08 04:19:31 -080064{
65 unsigned long addr = (unsigned long) vaddr & ~0x20000000;
66
David Howells012c79b2010-01-08 14:43:21 -080067 if (addr >= 0x9c000000)
68 return;
69
David Howellsb920de12008-02-08 04:19:31 -080070 free_pages(addr, get_order(size));
71}
Christoph Hellwigf1513412016-01-20 15:01:50 -080072
73static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
74 int nents, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070075 unsigned long attrs)
Christoph Hellwigf1513412016-01-20 15:01:50 -080076{
77 struct scatterlist *sg;
78 int i;
79
80 for_each_sg(sglist, sg, nents, i) {
81 BUG_ON(!sg_page(sg));
82
83 sg->dma_address = sg_phys(sg);
84 }
85
86 mn10300_dcache_flush_inv();
87 return nents;
88}
89
90static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
91 unsigned long offset, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070092 enum dma_data_direction direction, unsigned long attrs)
Christoph Hellwigf1513412016-01-20 15:01:50 -080093{
94 return page_to_bus(page) + offset;
95}
96
97static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction direction)
99{
100 mn10300_dcache_flush_inv();
101}
102
103static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
104 int nelems, enum dma_data_direction direction)
105{
106 mn10300_dcache_flush_inv();
107}
108
109static int mn10300_dma_supported(struct device *dev, u64 mask)
110{
111 /*
112 * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
113 * guarantee allocations that must be within a tighter range than
114 * GFP_DMA
115 */
116 if (mask < 0x00ffffff)
117 return 0;
118 return 1;
119}
120
Bart Van Assche52997092017-01-20 13:04:01 -0800121const struct dma_map_ops mn10300_dma_ops = {
Christoph Hellwigf1513412016-01-20 15:01:50 -0800122 .alloc = mn10300_dma_alloc,
123 .free = mn10300_dma_free,
124 .map_page = mn10300_dma_map_page,
125 .map_sg = mn10300_dma_map_sg,
126 .sync_single_for_device = mn10300_dma_sync_single_for_device,
127 .sync_sg_for_device = mn10300_dma_sync_sg_for_device,
128};