blob: ab700e1e59226696d96b9d21dc8241f2ebc285bb [file] [log] [blame]
Stefano Stabellini51218722014-11-21 11:06:39 +00001#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00003#include <linux/bootmem.h>
4#include <linux/gfp.h>
Stefano Stabellini51218722014-11-21 11:06:39 +00005#include <linux/highmem.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00006#include <linux/export.h>
Stefano Stabellini51218722014-11-21 11:06:39 +00007#include <linux/of_address.h>
Stefano Stabellini83862cc2013-10-10 13:40:44 +00008#include <linux/slab.h>
9#include <linux/types.h>
10#include <linux/dma-mapping.h>
11#include <linux/vmalloc.h>
12#include <linux/swiotlb.h>
13
14#include <xen/xen.h>
15#include <xen/interface/memory.h>
16#include <xen/swiotlb-xen.h>
17
18#include <asm/cacheflush.h>
19#include <asm/xen/page.h>
20#include <asm/xen/hypercall.h>
21#include <asm/xen/interface.h>
22
Stefano Stabellini51218722014-11-21 11:06:39 +000023enum dma_cache_op {
24 DMA_UNMAP,
25 DMA_MAP,
26};
27
28/* functions called by SWIOTLB */
29
30static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
31 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
32{
33 unsigned long pfn;
34 size_t left = size;
35
36 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
37 offset %= PAGE_SIZE;
38
39 do {
40 size_t len = left;
41
42 /* TODO: cache flush */
43
44 offset = 0;
45 pfn++;
46 left -= len;
47 } while (left);
48}
49
50static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
51 size_t size, enum dma_data_direction dir)
52{
53 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
54}
55
56static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
57 size_t size, enum dma_data_direction dir)
58{
59 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
60}
61
62void __xen_dma_map_page(struct device *hwdev, struct page *page,
63 dma_addr_t dev_addr, unsigned long offset, size_t size,
64 enum dma_data_direction dir, struct dma_attrs *attrs)
65{
66 if (is_device_dma_coherent(hwdev))
67 return;
68 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
69 return;
70
71 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
72}
73
74void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
75 size_t size, enum dma_data_direction dir,
76 struct dma_attrs *attrs)
77
78{
79 if (is_device_dma_coherent(hwdev))
80 return;
81 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
82 return;
83
84 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
85}
86
87void __xen_dma_sync_single_for_cpu(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{
90 if (is_device_dma_coherent(hwdev))
91 return;
92 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
93}
94
95void __xen_dma_sync_single_for_device(struct device *hwdev,
96 dma_addr_t handle, size_t size, enum dma_data_direction dir)
97{
98 if (is_device_dma_coherent(hwdev))
99 return;
100 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
101}
102
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000103int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000104 unsigned int address_bits,
105 dma_addr_t *dma_handle)
106{
107 if (!xen_initial_domain())
108 return -EINVAL;
109
110 /* we assume that dom0 is mapped 1:1 for now */
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000111 *dma_handle = pstart;
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000112 return 0;
113}
114EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
115
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000116void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000117{
118 return;
119}
120EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
121
122struct dma_map_ops *xen_dma_ops;
123EXPORT_SYMBOL_GPL(xen_dma_ops);
124
125static struct dma_map_ops xen_swiotlb_dma_ops = {
126 .mapping_error = xen_swiotlb_dma_mapping_error,
127 .alloc = xen_swiotlb_alloc_coherent,
128 .free = xen_swiotlb_free_coherent,
129 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
130 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
131 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
132 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
133 .map_sg = xen_swiotlb_map_sg_attrs,
134 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
135 .map_page = xen_swiotlb_map_page,
136 .unmap_page = xen_swiotlb_unmap_page,
137 .dma_supported = xen_swiotlb_dma_supported,
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000138 .set_dma_mask = xen_swiotlb_set_dma_mask,
Stefano Stabellini83862cc2013-10-10 13:40:44 +0000139};
140
141int __init xen_mm_init(void)
142{
143 if (!xen_initial_domain())
144 return 0;
145 xen_swiotlb_init(1, false);
146 xen_dma_ops = &xen_swiotlb_dma_ops;
147 return 0;
148}
149arch_initcall(xen_mm_init);