blob: 3ce9dc1efb0c222586b56fd9a8a0fde78ad0e864 [file] [log] [blame]
Stefano Stabellini340720b2014-09-10 22:49:41 +00001#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
Stefano Stabellini2f91fc32014-11-11 14:31:56 +00007enum dma_cache_op {
8 DMA_UNMAP,
9 DMA_MAP,
10};
Stefano Stabellini340720b2014-09-10 22:49:41 +000011
12/* functions called by SWIOTLB */
13
14static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000015 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
Stefano Stabellini340720b2014-09-10 22:49:41 +000016{
17 unsigned long pfn;
18 size_t left = size;
19
20 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
21 offset %= PAGE_SIZE;
22
23 do {
24 size_t len = left;
Stefano Stabellini340720b2014-09-10 22:49:41 +000025
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000026 /* TODO: cache flush */
Stefano Stabellini340720b2014-09-10 22:49:41 +000027
28 offset = 0;
29 pfn++;
30 left -= len;
31 } while (left);
32}
33
34static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
35 size_t size, enum dma_data_direction dir)
36{
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000037 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
Stefano Stabellini340720b2014-09-10 22:49:41 +000038}
39
40static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
41 size_t size, enum dma_data_direction dir)
42{
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000043 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
Stefano Stabellini340720b2014-09-10 22:49:41 +000044}
45
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000046void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
Stefano Stabellini340720b2014-09-10 22:49:41 +000047 size_t size, enum dma_data_direction dir,
48 struct dma_attrs *attrs)
49
50{
Stefano Stabellini2e2a7812014-11-20 10:42:40 +000051 if (is_device_dma_coherent(hwdev))
Stefano Stabellini340720b2014-09-10 22:49:41 +000052 return;
53 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
54 return;
55
56 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
57}
58
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000059void __xen_dma_sync_single_for_cpu(struct device *hwdev,
Stefano Stabellini340720b2014-09-10 22:49:41 +000060 dma_addr_t handle, size_t size, enum dma_data_direction dir)
61{
Stefano Stabellini2e2a7812014-11-20 10:42:40 +000062 if (is_device_dma_coherent(hwdev))
Stefano Stabellini340720b2014-09-10 22:49:41 +000063 return;
64 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
65}
66
Stefano Stabellini2f91fc32014-11-11 14:31:56 +000067void __xen_dma_sync_single_for_device(struct device *hwdev,
Stefano Stabellini340720b2014-09-10 22:49:41 +000068 dma_addr_t handle, size_t size, enum dma_data_direction dir)
69{
Stefano Stabellini2e2a7812014-11-20 10:42:40 +000070 if (is_device_dma_coherent(hwdev))
Stefano Stabellini340720b2014-09-10 22:49:41 +000071 return;
72 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
73}
74
75int __init xen_mm32_init(void)
76{
Stefano Stabellini340720b2014-09-10 22:49:41 +000077 if (!xen_initial_domain())
78 return 0;
79
Stefano Stabellini340720b2014-09-10 22:49:41 +000080 return 0;
81}
82arch_initcall(xen_mm32_init);