Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 1 | #ifndef _ASM_ARM_XEN_PAGE_H |
| 2 | #define _ASM_ARM_XEN_PAGE_H |
| 3 | |
| 4 | #include <asm/page.h> |
| 5 | #include <asm/pgtable.h> |
| 6 | |
| 7 | #include <linux/pfn.h> |
| 8 | #include <linux/types.h> |
| 9 | |
Stefano Stabellini | 4a19138 | 2013-10-17 16:22:27 +0000 | [diff] [blame^] | 10 | #include <xen/xen.h> |
Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 11 | #include <xen/interface/grant_table.h> |
| 12 | |
Ian Campbell | ee7b595 | 2012-10-17 09:39:17 +0100 | [diff] [blame] | 13 | #define phys_to_machine_mapping_valid(pfn) (1) |
Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 14 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
| 15 | |
| 16 | #define pte_mfn pte_pfn |
| 17 | #define mfn_pte pfn_pte |
| 18 | |
| 19 | /* Xen machine address */ |
| 20 | typedef struct xmaddr { |
| 21 | phys_addr_t maddr; |
| 22 | } xmaddr_t; |
| 23 | |
| 24 | /* Xen pseudo-physical address */ |
| 25 | typedef struct xpaddr { |
| 26 | phys_addr_t paddr; |
| 27 | } xpaddr_t; |
| 28 | |
| 29 | #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) |
| 30 | #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) |
| 31 | |
Ian Campbell | ee7b595 | 2012-10-17 09:39:17 +0100 | [diff] [blame] | 32 | #define INVALID_P2M_ENTRY (~0UL) |
| 33 | |
Stefano Stabellini | 4a19138 | 2013-10-17 16:22:27 +0000 | [diff] [blame^] | 34 | unsigned long __pfn_to_mfn(unsigned long pfn); |
| 35 | unsigned long __mfn_to_pfn(unsigned long mfn); |
| 36 | extern struct rb_root phys_to_mach; |
| 37 | |
| 38 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
| 39 | { |
| 40 | unsigned long mfn; |
| 41 | |
| 42 | if (phys_to_mach.rb_node != NULL) { |
| 43 | mfn = __pfn_to_mfn(pfn); |
| 44 | if (mfn != INVALID_P2M_ENTRY) |
| 45 | return mfn; |
| 46 | } |
| 47 | |
| 48 | if (xen_initial_domain()) |
| 49 | return pfn; |
| 50 | else |
| 51 | return INVALID_P2M_ENTRY; |
| 52 | } |
| 53 | |
| 54 | static inline unsigned long mfn_to_pfn(unsigned long mfn) |
| 55 | { |
| 56 | unsigned long pfn; |
| 57 | |
| 58 | if (phys_to_mach.rb_node != NULL) { |
| 59 | pfn = __mfn_to_pfn(mfn); |
| 60 | if (pfn != INVALID_P2M_ENTRY) |
| 61 | return pfn; |
| 62 | } |
| 63 | |
| 64 | if (xen_initial_domain()) |
| 65 | return mfn; |
| 66 | else |
| 67 | return INVALID_P2M_ENTRY; |
| 68 | } |
| 69 | |
| 70 | #define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) |
| 71 | |
Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 72 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) |
| 73 | { |
| 74 | unsigned offset = phys.paddr & ~PAGE_MASK; |
| 75 | return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); |
| 76 | } |
| 77 | |
| 78 | static inline xpaddr_t machine_to_phys(xmaddr_t machine) |
| 79 | { |
| 80 | unsigned offset = machine.maddr & ~PAGE_MASK; |
| 81 | return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); |
| 82 | } |
| 83 | /* VIRT <-> MACHINE conversion */ |
| 84 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) |
| 85 | #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) |
| 86 | #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) |
| 87 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
| 88 | |
| 89 | static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) |
| 90 | { |
| 91 | /* TODO: assuming it is mapped in the kernel 1:1 */ |
| 92 | return virt_to_machine(vaddr); |
| 93 | } |
| 94 | |
| 95 | /* TODO: this shouldn't be here but it is because the frontend drivers |
| 96 | * are using it (its rolled in headers) even though we won't hit the code path. |
| 97 | * So for right now just punt with this. |
| 98 | */ |
| 99 | static inline pte_t *lookup_address(unsigned long address, unsigned int *level) |
| 100 | { |
| 101 | BUG(); |
| 102 | return NULL; |
| 103 | } |
| 104 | |
| 105 | static inline int m2p_add_override(unsigned long mfn, struct page *page, |
| 106 | struct gnttab_map_grant_ref *kmap_op) |
| 107 | { |
| 108 | return 0; |
| 109 | } |
| 110 | |
| 111 | static inline int m2p_remove_override(struct page *page, bool clear_pte) |
| 112 | { |
| 113 | return 0; |
| 114 | } |
| 115 | |
Stefano Stabellini | 4a19138 | 2013-10-17 16:22:27 +0000 | [diff] [blame^] | 116 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
| 117 | bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, |
| 118 | unsigned long nr_pages); |
Ian Campbell | ee7b595 | 2012-10-17 09:39:17 +0100 | [diff] [blame] | 119 | |
Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 120 | static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
| 121 | { |
Ian Campbell | ee7b595 | 2012-10-17 09:39:17 +0100 | [diff] [blame] | 122 | return __set_phys_to_machine(pfn, mfn); |
Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 123 | } |
Stefano Stabellini | 3216dce | 2013-02-19 13:59:19 +0000 | [diff] [blame] | 124 | |
Stefano Stabellini | f0a8d59 | 2013-06-04 16:32:15 +0000 | [diff] [blame] | 125 | #define xen_remap(cookie, size) ioremap_cached((cookie), (size)); |
Stefano Stabellini | 3216dce | 2013-02-19 13:59:19 +0000 | [diff] [blame] | 126 | |
Stefano Stabellini | 36a67ab | 2012-08-08 16:33:46 +0000 | [diff] [blame] | 127 | #endif /* _ASM_ARM_XEN_PAGE_H */ |