blob: 95ce6ac3a971fb1425d2dc841466defdc850bf9c [file] [log] [blame]
Marc Zyngier2fbadc32016-12-02 14:19:35 +00001#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-mapping.h>
6
7void __xen_dma_map_page(struct device *hwdev, struct page *page,
8 dma_addr_t dev_addr, unsigned long offset, size_t size,
9 enum dma_data_direction dir, unsigned long attrs);
10void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
11 size_t size, enum dma_data_direction dir,
12 unsigned long attrs);
13void __xen_dma_sync_single_for_cpu(struct device *hwdev,
14 dma_addr_t handle, size_t size, enum dma_data_direction dir);
15
16void __xen_dma_sync_single_for_device(struct device *hwdev,
17 dma_addr_t handle, size_t size, enum dma_data_direction dir);
18
19static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
20 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
21{
22 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
23}
24
25static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
26 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
27{
28 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
29}
30
31static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
32 dma_addr_t dev_addr, unsigned long offset, size_t size,
33 enum dma_data_direction dir, unsigned long attrs)
34{
35 unsigned long page_pfn = page_to_xen_pfn(page);
36 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
37 unsigned long compound_pages =
38 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
39 bool local = (page_pfn <= dev_pfn) &&
40 (dev_pfn - page_pfn < compound_pages);
41
42 /*
43 * Dom0 is mapped 1:1, while the Linux page can span across
44 * multiple Xen pages, it's not possible for it to contain a
45 * mix of local and foreign Xen pages. So if the first xen_pfn
46 * == mfn the page is local otherwise it's a foreign page
47 * grant-mapped in dom0. If the page is local we can safely
48 * call the native dma_ops function, otherwise we call the xen
49 * specific function.
50 */
51 if (local)
52 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
53 else
54 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
55}
56
57static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
58 size_t size, enum dma_data_direction dir, unsigned long attrs)
59{
60 unsigned long pfn = PFN_DOWN(handle);
61 /*
62 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
63 * multiple Xen page, it's not possible to have a mix of local and
64 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
65 * foreign mfn will always return false. If the page is local we can
66 * safely call the native dma_ops function, otherwise we call the xen
67 * specific function.
68 */
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->unmap_page)
71 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
72 } else
73 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
74}
75
76static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
77 dma_addr_t handle, size_t size, enum dma_data_direction dir)
78{
79 unsigned long pfn = PFN_DOWN(handle);
80 if (pfn_valid(pfn)) {
81 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
82 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
83 } else
84 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
85}
86
87static inline void xen_dma_sync_single_for_device(struct device *hwdev,
88 dma_addr_t handle, size_t size, enum dma_data_direction dir)
89{
90 unsigned long pfn = PFN_DOWN(handle);
91 if (pfn_valid(pfn)) {
92 if (__generic_dma_ops(hwdev)->sync_single_for_device)
93 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
94 } else
95 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
96}
97
98#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */