blob: 2d76106788a31c5638459bc35306445e59f2c02d [file] [log] [blame]
Vitaly Kuznetsov7e0563d2017-04-04 14:48:17 +02001#include <linux/pfn.h>
2#include <asm/xen/page.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07003#include <asm/xen/hypercall.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08004#include <xen/interface/memory.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07005
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07006#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07007#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07008
Alex Nixon19001c82009-02-09 12:05:46 -08009/*
10 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010011 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080012 */
13DEFINE_SPINLOCK(xen_reservation_lock);
14
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -080015unsigned long arbitrary_virt_to_mfn(void *vaddr)
16{
17 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
18
19 return PFN_DOWN(maddr.maddr);
20}
21
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -070022xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070023{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -070024 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010025 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -070026 pte_t *pte;
27 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070028
Chris Lalancette9f32d212008-10-23 17:40:25 -070029 /*
30 * if the PFN is in the linear mapped vaddr range, we can just use
31 * the (quick) virt_to_machine() p2m lookup
32 */
33 if (virt_addr_valid(vaddr))
34 return virt_to_machine(vaddr);
35
36 /* otherwise we have to do a (slower) full page-table walk */
37
38 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070039 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -070040 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -070041 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070042}
Stephen Rothwellde23be52011-01-15 10:36:26 +110043EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
Steven Rostedt (VMware)45dd9b02018-05-09 14:36:09 -040045static noinline void xen_flush_tlb_all(void)
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -040046{
47 struct mmuext_op *op;
48 struct multicall_space mcs;
49
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -040050 preempt_disable();
51
52 mcs = xen_mc_entry(sizeof(*op));
53
54 op = mcs.args;
55 op->cmd = MMUEXT_TLB_FLUSH_ALL;
56 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
57
58 xen_mc_issue(PARAVIRT_LAZY_MMU);
59
60 preempt_enable();
61}
Alex Nixon08bbc9d2009-02-09 12:05:46 -080062
Ian Campbellde1ef202009-05-21 10:09:46 +010063#define REMAP_BATCH_SIZE 16
64
65struct remap_data {
David Vrabel4e8c0c82015-03-11 14:49:57 +000066 xen_pfn_t *mfn;
67 bool contiguous;
Ian Campbellde1ef202009-05-21 10:09:46 +010068 pgprot_t prot;
69 struct mmu_update *mmu_update;
70};
71
72static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
73 unsigned long addr, void *data)
74{
75 struct remap_data *rmd = data;
David Vrabel4e8c0c82015-03-11 14:49:57 +000076 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
77
Adam Buchbinder6a6256f2016-02-23 15:34:30 -080078 /* If we have a contiguous range, just update the mfn itself,
David Vrabel4e8c0c82015-03-11 14:49:57 +000079 else update pointer to be "next mfn". */
80 if (rmd->contiguous)
81 (*rmd->mfn)++;
82 else
83 rmd->mfn++;
Ian Campbellde1ef202009-05-21 10:09:46 +010084
Wei Liud785d9e2017-08-29 11:27:53 +010085 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
Ian Campbellde1ef202009-05-21 10:09:46 +010086 rmd->mmu_update->val = pte_val_ma(pte);
87 rmd->mmu_update++;
88
89 return 0;
90}
91
Julien Gralla13d7202015-08-07 17:34:41 +010092static int do_remap_gfn(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +000093 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +010094 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +000095 int *err_ptr, pgprot_t prot,
96 unsigned domid,
97 struct page **pages)
Ian Campbellde1ef202009-05-21 10:09:46 +010098{
David Vrabel4e8c0c82015-03-11 14:49:57 +000099 int err = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +0100100 struct remap_data rmd;
101 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
Ian Campbellde1ef202009-05-21 10:09:46 +0100102 unsigned long range;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000103 int mapped = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +0100104
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -0700105 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +0100106
Julien Gralla13d7202015-08-07 17:34:41 +0100107 rmd.mfn = gfn;
Ian Campbellde1ef202009-05-21 10:09:46 +0100108 rmd.prot = prot;
Adam Buchbinder6a6256f2016-02-23 15:34:30 -0800109 /* We use the err_ptr to indicate if there we are doing a contiguous
David Vrabel4e8c0c82015-03-11 14:49:57 +0000110 * mapping or a discontigious mapping. */
111 rmd.contiguous = !err_ptr;
Ian Campbellde1ef202009-05-21 10:09:46 +0100112
113 while (nr) {
David Vrabel4e8c0c82015-03-11 14:49:57 +0000114 int index = 0;
115 int done = 0;
116 int batch = min(REMAP_BATCH_SIZE, nr);
117 int batch_left = batch;
Ian Campbellde1ef202009-05-21 10:09:46 +0100118 range = (unsigned long)batch << PAGE_SHIFT;
119
120 rmd.mmu_update = mmu_update;
121 err = apply_to_page_range(vma->vm_mm, addr, range,
122 remap_area_mfn_pte_fn, &rmd);
123 if (err)
124 goto out;
125
David Vrabel4e8c0c82015-03-11 14:49:57 +0000126 /* We record the error for each page that gives an error, but
127 * continue mapping until the whole set is done */
128 do {
129 int i;
130
131 err = HYPERVISOR_mmu_update(&mmu_update[index],
132 batch_left, &done, domid);
133
134 /*
Julien Gralla13d7202015-08-07 17:34:41 +0100135 * @err_ptr may be the same buffer as @gfn, so
136 * only clear it after each chunk of @gfn is
David Vrabel4e8c0c82015-03-11 14:49:57 +0000137 * used.
138 */
139 if (err_ptr) {
140 for (i = index; i < index + done; i++)
141 err_ptr[i] = 0;
142 }
143 if (err < 0) {
144 if (!err_ptr)
145 goto out;
146 err_ptr[i] = err;
147 done++; /* Skip failed frame. */
148 } else
149 mapped += done;
150 batch_left -= done;
151 index += done;
152 } while (batch_left);
Ian Campbellde1ef202009-05-21 10:09:46 +0100153
154 nr -= batch;
155 addr += range;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000156 if (err_ptr)
157 err_ptr += batch;
David Vrabel914beb92015-10-28 13:39:05 +0000158 cond_resched();
Ian Campbellde1ef202009-05-21 10:09:46 +0100159 }
Ian Campbellde1ef202009-05-21 10:09:46 +0100160out:
161
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -0400162 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +0100163
David Vrabel4e8c0c82015-03-11 14:49:57 +0000164 return err < 0 ? err : mapped;
165}
166
Julien Gralla13d7202015-08-07 17:34:41 +0100167int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +0000168 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +0100169 xen_pfn_t gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +0000170 pgprot_t prot, unsigned domid,
171 struct page **pages)
172{
Paul Durrantec4001c2017-11-03 17:04:11 +0000173 if (xen_feature(XENFEAT_auto_translated_physmap))
174 return -EOPNOTSUPP;
175
Julien Gralla13d7202015-08-07 17:34:41 +0100176 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
Ian Campbellde1ef202009-05-21 10:09:46 +0100177}
Julien Gralla13d7202015-08-07 17:34:41 +0100178EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -0700179
Julien Gralla13d7202015-08-07 17:34:41 +0100180int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +0000181 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +0100182 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +0000183 int *err_ptr, pgprot_t prot,
184 unsigned domid, struct page **pages)
185{
Paul Durrantec4001c2017-11-03 17:04:11 +0000186 if (xen_feature(XENFEAT_auto_translated_physmap))
187 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
188 prot, domid, pages);
189
David Vrabel4e8c0c82015-03-11 14:49:57 +0000190 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
191 * and the consequences later is quite hard to detect what the actual
192 * cause of "wrong memory was mapped in".
193 */
194 BUG_ON(err_ptr == NULL);
Julien Gralla13d7202015-08-07 17:34:41 +0100195 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
David Vrabel4e8c0c82015-03-11 14:49:57 +0000196}
Julien Gralla13d7202015-08-07 17:34:41 +0100197EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
David Vrabel4e8c0c82015-03-11 14:49:57 +0000198
Ian Campbell9a032e32012-10-17 13:37:49 -0700199/* Returns: 0 success */
Julien Gralla13d7202015-08-07 17:34:41 +0100200int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
Paul Durrantec4001c2017-11-03 17:04:11 +0000201 int nr, struct page **pages)
Ian Campbell9a032e32012-10-17 13:37:49 -0700202{
Paul Durrantec4001c2017-11-03 17:04:11 +0000203 if (xen_feature(XENFEAT_auto_translated_physmap))
204 return xen_xlate_unmap_gfn_range(vma, nr, pages);
205
206 if (!pages)
Ian Campbell9a032e32012-10-17 13:37:49 -0700207 return 0;
208
209 return -EINVAL;
210}
Julien Gralla13d7202015-08-07 17:34:41 +0100211EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);