blob: 28ebf3ecee4ee6a85e03b4821d7654a106bad085 [file] [log] [blame]
Stefano Stabellini51218722014-11-21 11:06:39 +00001#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00003#include <linux/bootmem.h>
4#include <linux/gfp.h>
Stefano Stabellini51218722014-11-21 11:06:39 +00005#include <linux/highmem.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00006#include <linux/export.h>
Stefano Stabellini51218722014-11-21 11:06:39 +00007#include <linux/of_address.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00008#include <linux/slab.h>
9#include <linux/types.h>
10#include <linux/dma-mapping.h>
11#include <linux/vmalloc.h>
12#include <linux/swiotlb.h>
13
14#include <xen/xen.h>
15#include <xen/interface/memory.h>
16#include <xen/swiotlb-xen.h>
17
18#include <asm/cacheflush.h>
19#include <asm/xen/page.h>
20#include <asm/xen/hypercall.h>
21#include <asm/xen/interface.h>
22
Stefano Stabellini51218722014-11-21 11:06:39 +000023enum dma_cache_op {
24 DMA_UNMAP,
25 DMA_MAP,
26};
27
28/* functions called by SWIOTLB */
29
30static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
31 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
32{
33 unsigned long pfn;
34 size_t left = size;
35
36 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
37 offset %= PAGE_SIZE;
38
39 do {
40 size_t len = left;
41
42 /* TODO: cache flush */
43
44 offset = 0;
45 pfn++;
46 left -= len;
47 } while (left);
48}
49
50static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
51 size_t size, enum dma_data_direction dir)
52{
53 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
54}
55
56static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
57 size_t size, enum dma_data_direction dir)
58{
59 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
60}
61
62void __xen_dma_map_page(struct device *hwdev, struct page *page,
63 dma_addr_t dev_addr, unsigned long offset, size_t size,
64 enum dma_data_direction dir, struct dma_attrs *attrs)
65{
66 if (is_device_dma_coherent(hwdev))
67 return;
68 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
69 return;
70
71 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
72}
73
74void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
75 size_t size, enum dma_data_direction dir,
76 struct dma_attrs *attrs)
77
78{
79 if (is_device_dma_coherent(hwdev))
80 return;
81 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
82 return;
83
84 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
85}
86
87void __xen_dma_sync_single_for_cpu(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{
90 if (is_device_dma_coherent(hwdev))
91 return;
92 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
93}
94
95void __xen_dma_sync_single_for_device(struct device *hwdev,
96 dma_addr_t handle, size_t size, enum dma_data_direction dir)
97{
98 if (is_device_dma_coherent(hwdev))
99 return;
100 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
101}
102
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000103bool xen_arch_need_swiotlb(struct device *dev,
104 unsigned long pfn,
105 unsigned long mfn)
106{
107 return ((pfn != mfn) && !is_device_dma_coherent(dev));
108}
109
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000110int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000111 unsigned int address_bits,
112 dma_addr_t *dma_handle)
113{
114 if (!xen_initial_domain())
115 return -EINVAL;
116
117 /* we assume that dom0 is mapped 1:1 for now */
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000118 *dma_handle = pstart;
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000119 return 0;
120}
121EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
122
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000123void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000124{
125 return;
126}
127EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
128
129struct dma_map_ops *xen_dma_ops;
130EXPORT_SYMBOL_GPL(xen_dma_ops);
131
132static struct dma_map_ops xen_swiotlb_dma_ops = {
133 .mapping_error = xen_swiotlb_dma_mapping_error,
134 .alloc = xen_swiotlb_alloc_coherent,
135 .free = xen_swiotlb_free_coherent,
136 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
137 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
138 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
139 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
140 .map_sg = xen_swiotlb_map_sg_attrs,
141 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
142 .map_page = xen_swiotlb_map_page,
143 .unmap_page = xen_swiotlb_unmap_page,
144 .dma_supported = xen_swiotlb_dma_supported,
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000145 .set_dma_mask = xen_swiotlb_set_dma_mask,
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000146};
147
148int __init xen_mm_init(void)
149{
150 if (!xen_initial_domain())
151 return 0;
152 xen_swiotlb_init(1, false);
153 xen_dma_ops = &xen_swiotlb_dma_ops;
154 return 0;
155}
156arch_initcall(xen_mm_init);