blob: c86919bbea8347a502191ae4f9c1aed43f78642e [file] [log] [blame]
Stefano Stabellini340720b2014-09-10 22:49:41 +00001#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
Stefano Stabellini2f91fc32014-11-11 14:31:56 +00007enum dma_cache_op {
8 DMA_UNMAP,
9 DMA_MAP,
10};
Stefano Stabellini340720b2014-09-10 22:49:41 +000011
12/* functions called by SWIOTLB */
13
14static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000015 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
Stefano Stabellini340720b2014-09-10 22:49:41 +000016{
17 unsigned long pfn;
18 size_t left = size;
19
20 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
21 offset %= PAGE_SIZE;
22
23 do {
24 size_t len = left;
Stefano Stabellini340720b2014-09-10 22:49:41 +000025
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000026 /* TODO: cache flush */
Stefano Stabellini340720b2014-09-10 22:49:41 +000027
28 offset = 0;
29 pfn++;
30 left -= len;
31 } while (left);
32}
33
34static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
35 size_t size, enum dma_data_direction dir)
36{
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000037 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
Stefano Stabellini340720b2014-09-10 22:49:41 +000038}
39
40static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
41 size_t size, enum dma_data_direction dir)
42{
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000043 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
Stefano Stabellini340720b2014-09-10 22:49:41 +000044}
45
Stefano Stabellini35672582014-11-21 11:05:39 +000046void __xen_dma_map_page(struct device *hwdev, struct page *page,
47 dma_addr_t dev_addr, unsigned long offset, size_t size,
48 enum dma_data_direction dir, struct dma_attrs *attrs)
49{
50 if (is_device_dma_coherent(hwdev))
51 return;
52 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
53 return;
54
55 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
56}
57
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000058void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
Stefano Stabellini340720b2014-09-10 22:49:41 +000059 size_t size, enum dma_data_direction dir,
60 struct dma_attrs *attrs)
61
62{
Stefano Stabellini2e2a7812014-11-20 10:42:40 +000063 if (is_device_dma_coherent(hwdev))
Stefano Stabellini340720b2014-09-10 22:49:41 +000064 return;
65 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
66 return;
67
68 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
69}
70
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000071void __xen_dma_sync_single_for_cpu(struct device *hwdev,
Stefano Stabellini340720b2014-09-10 22:49:41 +000072 dma_addr_t handle, size_t size, enum dma_data_direction dir)
73{
Stefano Stabellini2e2a7812014-11-20 10:42:40 +000074 if (is_device_dma_coherent(hwdev))
Stefano Stabellini340720b2014-09-10 22:49:41 +000075 return;
76 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
77}
78
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000079void __xen_dma_sync_single_for_device(struct device *hwdev,
Stefano Stabellini340720b2014-09-10 22:49:41 +000080 dma_addr_t handle, size_t size, enum dma_data_direction dir)
81{
Stefano Stabellini2e2a7812014-11-20 10:42:40 +000082 if (is_device_dma_coherent(hwdev))
Stefano Stabellini340720b2014-09-10 22:49:41 +000083 return;
84 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
85}
86
87int __init xen_mm32_init(void)
88{
Stefano Stabellini340720b2014-09-10 22:49:41 +000089 if (!xen_initial_domain())
90 return 0;
91
Stefano Stabellini340720b2014-09-10 22:49:41 +000092 return 0;
93}
94arch_initcall(xen_mm32_init);