blob: 793551d15f1dac1a1241b23971a788b4c36702b6 [file] [log] [blame]
Stefano Stabellini51218722014-11-21 11:06:39 +00001#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00003#include <linux/bootmem.h>
4#include <linux/gfp.h>
Stefano Stabellini51218722014-11-21 11:06:39 +00005#include <linux/highmem.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00006#include <linux/export.h>
Stefano Stabellini51218722014-11-21 11:06:39 +00007#include <linux/of_address.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00008#include <linux/slab.h>
9#include <linux/types.h>
10#include <linux/dma-mapping.h>
11#include <linux/vmalloc.h>
12#include <linux/swiotlb.h>
13
14#include <xen/xen.h>
Stefano Stabellinida095a92014-11-21 11:08:39 +000015#include <xen/interface/grant_table.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +000016#include <xen/interface/memory.h>
17#include <xen/swiotlb-xen.h>
18
19#include <asm/cacheflush.h>
20#include <asm/xen/page.h>
21#include <asm/xen/hypercall.h>
22#include <asm/xen/interface.h>
23
Stefano Stabellini51218722014-11-21 11:06:39 +000024enum dma_cache_op {
25 DMA_UNMAP,
26 DMA_MAP,
27};
Stefano Stabellinida095a92014-11-21 11:08:39 +000028static bool hypercall_cflush = false;
Stefano Stabellini51218722014-11-21 11:06:39 +000029
30/* functions called by SWIOTLB */
31
32static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
33 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
34{
Stefano Stabellinida095a92014-11-21 11:08:39 +000035 struct gnttab_cache_flush cflush;
Stefano Stabellini51218722014-11-21 11:06:39 +000036 unsigned long pfn;
37 size_t left = size;
38
39 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
40 offset %= PAGE_SIZE;
41
42 do {
43 size_t len = left;
44
Stefano Stabellinida095a92014-11-21 11:08:39 +000045 /* buffers in highmem or foreign pages cannot cross page
46 * boundaries */
47 if (len + offset > PAGE_SIZE)
48 len = PAGE_SIZE - offset;
49
50 cflush.op = 0;
51 cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
52 cflush.offset = offset;
53 cflush.length = len;
54
55 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
56 cflush.op = GNTTAB_CACHE_INVAL;
57 if (op == DMA_MAP) {
58 if (dir == DMA_FROM_DEVICE)
59 cflush.op = GNTTAB_CACHE_INVAL;
60 else
61 cflush.op = GNTTAB_CACHE_CLEAN;
62 }
63 if (cflush.op)
64 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
Stefano Stabellini51218722014-11-21 11:06:39 +000065
66 offset = 0;
67 pfn++;
68 left -= len;
69 } while (left);
70}
71
72static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
73 size_t size, enum dma_data_direction dir)
74{
75 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
76}
77
78static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
79 size_t size, enum dma_data_direction dir)
80{
81 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
82}
83
84void __xen_dma_map_page(struct device *hwdev, struct page *page,
85 dma_addr_t dev_addr, unsigned long offset, size_t size,
86 enum dma_data_direction dir, struct dma_attrs *attrs)
87{
88 if (is_device_dma_coherent(hwdev))
89 return;
90 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
91 return;
92
93 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
94}
95
96void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
97 size_t size, enum dma_data_direction dir,
98 struct dma_attrs *attrs)
99
100{
101 if (is_device_dma_coherent(hwdev))
102 return;
103 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
104 return;
105
106 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
107}
108
109void __xen_dma_sync_single_for_cpu(struct device *hwdev,
110 dma_addr_t handle, size_t size, enum dma_data_direction dir)
111{
112 if (is_device_dma_coherent(hwdev))
113 return;
114 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
115}
116
117void __xen_dma_sync_single_for_device(struct device *hwdev,
118 dma_addr_t handle, size_t size, enum dma_data_direction dir)
119{
120 if (is_device_dma_coherent(hwdev))
121 return;
122 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
123}
124
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000125bool xen_arch_need_swiotlb(struct device *dev,
126 unsigned long pfn,
127 unsigned long mfn)
128{
Stefano Stabellinida095a92014-11-21 11:08:39 +0000129 return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000130}
131
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000132int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000133 unsigned int address_bits,
134 dma_addr_t *dma_handle)
135{
136 if (!xen_initial_domain())
137 return -EINVAL;
138
139 /* we assume that dom0 is mapped 1:1 for now */
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000140 *dma_handle = pstart;
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000141 return 0;
142}
143EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
144
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000145void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000146{
147 return;
148}
149EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
150
151struct dma_map_ops *xen_dma_ops;
Chuck Tuffli35c8ab42014-12-21 12:30:58 -0800152EXPORT_SYMBOL(xen_dma_ops);
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000153
154static struct dma_map_ops xen_swiotlb_dma_ops = {
155 .mapping_error = xen_swiotlb_dma_mapping_error,
156 .alloc = xen_swiotlb_alloc_coherent,
157 .free = xen_swiotlb_free_coherent,
158 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
159 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
160 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
161 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
162 .map_sg = xen_swiotlb_map_sg_attrs,
163 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
164 .map_page = xen_swiotlb_map_page,
165 .unmap_page = xen_swiotlb_unmap_page,
166 .dma_supported = xen_swiotlb_dma_supported,
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000167 .set_dma_mask = xen_swiotlb_set_dma_mask,
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000168};
169
170int __init xen_mm_init(void)
171{
Stefano Stabellinida095a92014-11-21 11:08:39 +0000172 struct gnttab_cache_flush cflush;
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000173 if (!xen_initial_domain())
174 return 0;
175 xen_swiotlb_init(1, false);
176 xen_dma_ops = &xen_swiotlb_dma_ops;
Stefano Stabellinida095a92014-11-21 11:08:39 +0000177
178 cflush.op = 0;
179 cflush.a.dev_bus_addr = 0;
180 cflush.offset = 0;
181 cflush.length = 0;
182 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
183 hypercall_cflush = true;
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000184 return 0;
185}
186arch_initcall(xen_mm_init);