blob: 22be12b60a8ff2b12678e5198379d19260de9ce4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
Andrew Morton8fa3d6f2006-06-26 13:59:05 +02003#include <linux/dma-mapping.h>
Jens Axboeb922f532007-07-24 12:39:27 +02004#include <linux/scatterlist.h>
Jaswinder Singh Rajput1894e362009-03-21 17:01:25 +05305#include <linux/string.h>
6#include <linux/init.h>
7#include <linux/pci.h>
8#include <linux/mm.h>
Andrew Morton8fa3d6f2006-06-26 13:59:05 +02009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/processor.h>
Jaswinder Singh Rajput1894e362009-03-21 17:01:25 +053011#include <asm/iommu.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010012#include <asm/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010014static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070016{
FUJITA Tomonoria4c2baa2009-07-10 10:04:55 +090017 if (hwdev && !dma_capable(hwdev, bus, size)) {
Yang Hongyang284901a2009-04-06 19:01:15 -070018 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
Andi Kleenf0fdabf2006-05-15 18:19:38 +020019 printk(KERN_ERR
Andrew Morton8fa3d6f2006-06-26 13:59:05 +020020 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
21 name, (long long)bus, size,
22 (long long)*hwdev->dma_mask);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010023 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026}
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090028static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
29 unsigned long offset, size_t size,
30 enum dma_data_direction dir,
31 struct dma_attrs *attrs)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010032{
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090033 dma_addr_t bus = page_to_phys(page) + offset;
Glauber Costa5b3e5b72008-04-08 13:20:49 -030034 WARN_ON(size == 0);
FUJITA Tomonori33feffd2009-01-05 23:47:27 +090035 if (!check_addr("map_single", dev, bus, size))
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +090036 return DMA_ERROR_CODE;
Glauber Costae4dcdd62008-04-08 13:20:46 -030037 flush_write_buffers();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010038 return bus;
39}
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010041/* Map a set of buffers described by scatterlist in streaming
42 * mode for DMA. This is the scatter-gather version of the
43 * above pci_map_single interface. Here the scatter gather list
44 * elements are each tagged with the appropriate dma address
45 * and length. They are obtained via sg_dma_{address,length}(SG).
46 *
47 * NOTE: An implementation may be able to use a smaller number of
48 * DMA address/length pairs than there are SG table elements.
49 * (for example via virtual mapping capabilities)
50 * The routine returns the number of addr/length pairs actually
51 * used, at most nents.
52 *
53 * Device ownership issues as mentioned above for pci_map_single are
54 * the same here.
55 */
Yinghai Lu1048fa52007-07-21 17:11:23 +020056static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090057 int nents, enum dma_data_direction dir,
58 struct dma_attrs *attrs)
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010059{
Jens Axboeb922f532007-07-24 12:39:27 +020060 struct scatterlist *s;
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010061 int i;
62
Glauber Costa5b3e5b72008-04-08 13:20:49 -030063 WARN_ON(nents == 0 || sg[0].length == 0);
64
Jens Axboeb922f532007-07-24 12:39:27 +020065 for_each_sg(sg, s, nents, i) {
Jens Axboe58b053e2007-10-22 20:02:46 +020066 BUG_ON(!sg_page(s));
Glauber Costa30db2cb2008-04-08 13:20:47 -030067 s->dma_address = sg_phys(s);
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010068 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
69 return 0;
70 s->dma_length = s->length;
71 }
Glauber Costae4dcdd62008-04-08 13:20:46 -030072 flush_write_buffers();
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010073 return nents;
74}
75
Joerg Roedela3a76532008-08-19 16:32:43 +020076static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
77 dma_addr_t dma_addr)
78{
79 free_pages((unsigned long)vaddr, get_order(size));
80}
81
Arnd Bergmanna8ad5682009-08-10 11:53:10 +090082static void nommu_sync_single_for_device(struct device *dev,
83 dma_addr_t addr, size_t size,
84 enum dma_data_direction dir)
85{
86 flush_write_buffers();
87}
88
89
90static void nommu_sync_sg_for_device(struct device *dev,
91 struct scatterlist *sg, int nelems,
92 enum dma_data_direction dir)
93{
94 flush_write_buffers();
95}
96
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090097struct dma_map_ops nommu_dma_ops = {
Arnd Bergmanna8ad5682009-08-10 11:53:10 +090098 .alloc_coherent = dma_generic_alloc_coherent,
99 .free_coherent = nommu_free_coherent,
100 .map_sg = nommu_map_sg,
101 .map_page = nommu_map_page,
102 .sync_single_for_device = nommu_sync_single_for_device,
103 .sync_sg_for_device = nommu_sync_sg_for_device,
104 .is_phys = 1,
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +0100105};