blob: e4e214a5abd531b0fa1805186ac32a5cb8576253 [file] [log] [blame]
Isaku Yamahatae04d0d02008-04-02 10:53:55 -07001#ifndef INCLUDE_XEN_OPS_H
2#define INCLUDE_XEN_OPS_H
3
4#include <linux/percpu.h>
Stanislaw Gruszkacd979882014-02-26 11:30:30 +01005#include <linux/notifier.h>
Daniel Kiperbe81c8a2014-06-30 19:53:02 +02006#include <linux/efi.h>
Ian Campbell7892f692012-10-16 17:19:15 +01007#include <asm/xen/interface.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -07008
9DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
10
Ian Campbell03c81422011-02-17 11:04:20 +000011void xen_arch_pre_suspend(void);
12void xen_arch_post_suspend(int suspend_cancelled);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +010013
Isaku Yamahataad55db92008-07-08 15:06:32 -070014void xen_timer_resume(void);
15void xen_arch_resume(void);
Boris Ostrovsky2b953a52015-04-28 18:46:20 -040016void xen_arch_suspend(void);
Isaku Yamahataad55db92008-07-08 15:06:32 -070017
Stanislaw Gruszkacd979882014-02-26 11:30:30 +010018void xen_resume_notifier_register(struct notifier_block *nb);
19void xen_resume_notifier_unregister(struct notifier_block *nb);
20
Stefano Stabellini016b6f52010-05-14 12:45:07 +010021int xen_setup_shutdown_event(void);
22
Alex Nixon08bbc9d2009-02-09 12:05:46 -080023extern unsigned long *xen_contiguous_bitmap;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000024int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +000025 unsigned int address_bits,
26 dma_addr_t *dma_handle);
Alex Nixon08bbc9d2009-02-09 12:05:46 -080027
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000028void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
Alex Nixon08bbc9d2009-02-09 12:05:46 -080029
David Howellsc140d872012-03-28 18:30:02 +010030struct vm_area_struct;
David Vrabel4e8c0c82015-03-11 14:49:57 +000031
32/*
Julien Gralla13d7202015-08-07 17:34:41 +010033 * xen_remap_domain_gfn_array() - map an array of foreign frames
David Vrabel4e8c0c82015-03-11 14:49:57 +000034 * @vma: VMA to map the pages into
35 * @addr: Address at which to map the pages
36 * @gfn: Array of GFNs to map
37 * @nr: Number entries in the GFN array
38 * @err_ptr: Returns per-GFN error status.
39 * @prot: page protection mask
40 * @domid: Domain owning the pages
41 * @pages: Array of pages if this domain has an auto-translated physmap
42 *
43 * @gfn and @err_ptr may point to the same buffer, the GFNs will be
44 * overwritten by the error codes after they are mapped.
45 *
46 * Returns the number of successfully mapped frames, or a -ve error
47 * code.
48 */
Julien Gralla13d7202015-08-07 17:34:41 +010049int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +000050 unsigned long addr,
51 xen_pfn_t *gfn, int nr,
52 int *err_ptr, pgprot_t prot,
53 unsigned domid,
54 struct page **pages);
55
Julien Gralla13d7202015-08-07 17:34:41 +010056/* xen_remap_domain_gfn_range() - map a range of foreign frames
David Vrabel4e8c0c82015-03-11 14:49:57 +000057 * @vma: VMA to map the pages into
58 * @addr: Address at which to map the pages
59 * @gfn: First GFN to map.
60 * @nr: Number frames to map
61 * @prot: page protection mask
62 * @domid: Domain owning the pages
63 * @pages: Array of pages if this domain has an auto-translated physmap
64 *
65 * Returns the number of successfully mapped frames, or a -ve error
66 * code.
67 */
Julien Gralla13d7202015-08-07 17:34:41 +010068int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
Ian Campbellde1ef202009-05-21 10:09:46 +010069 unsigned long addr,
David Vrabel4e8c0c82015-03-11 14:49:57 +000070 xen_pfn_t gfn, int nr,
Ian Campbell9a032e32012-10-17 13:37:49 -070071 pgprot_t prot, unsigned domid,
72 struct page **pages);
Julien Gralla13d7202015-08-07 17:34:41 +010073int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
Ian Campbell9a032e32012-10-17 13:37:49 -070074 int numpgs, struct page **pages);
David Vrabel4e8c0c82015-03-11 14:49:57 +000075int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
David Vrabel628c28e2015-03-11 14:49:56 +000076 unsigned long addr,
David Vrabel4e8c0c82015-03-11 14:49:57 +000077 xen_pfn_t *gfn, int nr,
78 int *err_ptr, pgprot_t prot,
David Vrabel628c28e2015-03-11 14:49:56 +000079 unsigned domid,
80 struct page **pages);
81int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
82 int nr, struct page **pages);
Ian Campbellde1ef202009-05-21 10:09:46 +010083
Konrad Rzeszutek Wilk394b40f2012-11-27 11:39:40 -050084bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
Daniel Kiperbe81c8a2014-06-30 19:53:02 +020085
86#ifdef CONFIG_XEN_EFI
87extern efi_system_table_t *xen_efi_probe(void);
88#else
Daniel Kiper57f71382014-07-12 23:09:47 +020089static inline efi_system_table_t __init *xen_efi_probe(void)
Daniel Kiperbe81c8a2014-06-30 19:53:02 +020090{
91 return NULL;
92}
93#endif
94
David Vrabelfdfd8112015-02-19 15:23:17 +000095#ifdef CONFIG_PREEMPT
96
97static inline void xen_preemptible_hcall_begin(void)
98{
99}
100
101static inline void xen_preemptible_hcall_end(void)
102{
103}
104
105#else
106
107DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
108
109static inline void xen_preemptible_hcall_begin(void)
110{
111 __this_cpu_write(xen_in_preemptible_hcall, true);
112}
113
114static inline void xen_preemptible_hcall_end(void)
115{
116 __this_cpu_write(xen_in_preemptible_hcall, false);
117}
118
119#endif /* CONFIG_PREEMPT */
120
Isaku Yamahatae04d0d02008-04-02 10:53:55 -0700121#endif /* INCLUDE_XEN_OPS_H */