blob: 5063c5e796b7f1a27849f191ba4b11940c3f2960 [file] [log] [blame]
David Vrabel628c28e2015-03-11 14:49:56 +00001/*
2 * MMU operations common to all auto-translated physmap guests.
3 *
4 * Copyright (C) 2015 Citrix Systems R&D Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30#include <linux/kernel.h>
31#include <linux/mm.h>
32
33#include <asm/xen/hypercall.h>
34#include <asm/xen/hypervisor.h>
35
36#include <xen/xen.h>
37#include <xen/page.h>
38#include <xen/interface/xen.h>
39#include <xen/interface/memory.h>
40
Julien Grall5995a682015-05-05 16:54:12 +010041typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
42
43/* Break down the pages in 4KB chunk and call fn for each gfn */
44static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
45 xen_gfn_fn_t fn, void *data)
David Vrabel628c28e2015-03-11 14:49:56 +000046{
Julien Grall5995a682015-05-05 16:54:12 +010047 unsigned long xen_pfn = 0;
48 struct page *page;
49 int i;
David Vrabel628c28e2015-03-11 14:49:56 +000050
Julien Grall5995a682015-05-05 16:54:12 +010051 for (i = 0; i < nr_gfn; i++) {
52 if ((i % XEN_PFN_PER_PAGE) == 0) {
53 page = pages[i / XEN_PFN_PER_PAGE];
54 xen_pfn = page_to_xen_pfn(page);
55 }
56 fn(pfn_to_gfn(xen_pfn++), data);
57 }
David Vrabel628c28e2015-03-11 14:49:56 +000058}
59
60struct remap_data {
Julien Gralla13d7202015-08-07 17:34:41 +010061 xen_pfn_t *fgfn; /* foreign domain's gfn */
Julien Grall5995a682015-05-05 16:54:12 +010062 int nr_fgfn; /* Number of foreign gfn left to map */
David Vrabel628c28e2015-03-11 14:49:56 +000063 pgprot_t prot;
64 domid_t domid;
65 struct vm_area_struct *vma;
66 int index;
67 struct page **pages;
Julien Gralla13d7202015-08-07 17:34:41 +010068 struct xen_remap_gfn_info *info;
David Vrabel4e8c0c82015-03-11 14:49:57 +000069 int *err_ptr;
70 int mapped;
Julien Grall5995a682015-05-05 16:54:12 +010071
72 /* Hypercall parameters */
73 int h_errs[XEN_PFN_PER_PAGE];
74 xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
75 xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
76
77 int h_iter; /* Iterator */
David Vrabel628c28e2015-03-11 14:49:56 +000078};
79
Julien Grall5995a682015-05-05 16:54:12 +010080static void setup_hparams(unsigned long gfn, void *data)
81{
82 struct remap_data *info = data;
83
84 info->h_idxs[info->h_iter] = *info->fgfn;
85 info->h_gpfns[info->h_iter] = gfn;
86 info->h_errs[info->h_iter] = 0;
87
88 info->h_iter++;
89 info->fgfn++;
90}
91
David Vrabel628c28e2015-03-11 14:49:56 +000092static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
93 void *data)
94{
95 struct remap_data *info = data;
96 struct page *page = info->pages[info->index++];
Julien Grall5995a682015-05-05 16:54:12 +010097 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
98 int rc, nr_gfn;
99 uint32_t i;
100 struct xen_add_to_physmap_range xatp = {
101 .domid = DOMID_SELF,
102 .foreign_domid = info->domid,
103 .space = XENMAPSPACE_gmfn_foreign,
104 };
David Vrabel628c28e2015-03-11 14:49:56 +0000105
Julien Grall5995a682015-05-05 16:54:12 +0100106 nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
107 info->nr_fgfn -= nr_gfn;
108
109 info->h_iter = 0;
110 xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
111 BUG_ON(info->h_iter != nr_gfn);
112
113 set_xen_guest_handle(xatp.idxs, info->h_idxs);
114 set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
115 set_xen_guest_handle(xatp.errs, info->h_errs);
116 xatp.size = nr_gfn;
117
118 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
119
120 /* info->err_ptr expect to have one error status per Xen PFN */
121 for (i = 0; i < nr_gfn; i++) {
122 int err = (rc < 0) ? rc : info->h_errs[i];
123
124 *(info->err_ptr++) = err;
125 if (!err)
126 info->mapped++;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000127 }
Julien Grall5995a682015-05-05 16:54:12 +0100128
129 /*
130 * Note: The hypercall will return 0 in most of the case if even if
131 * all the fgmfn are not mapped. We still have to update the pte
132 * as the userspace may decide to continue.
133 */
134 if (!rc)
135 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
David Vrabel628c28e2015-03-11 14:49:56 +0000136
137 return 0;
138}
139
David Vrabel4e8c0c82015-03-11 14:49:57 +0000140int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
David Vrabel628c28e2015-03-11 14:49:56 +0000141 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +0100142 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +0000143 int *err_ptr, pgprot_t prot,
144 unsigned domid,
David Vrabel628c28e2015-03-11 14:49:56 +0000145 struct page **pages)
146{
147 int err;
148 struct remap_data data;
Julien Grall5995a682015-05-05 16:54:12 +0100149 unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
David Vrabel628c28e2015-03-11 14:49:56 +0000150
David Vrabel4e8c0c82015-03-11 14:49:57 +0000151 /* Kept here for the purpose of making sure code doesn't break
152 x86 PVOPS */
153 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
David Vrabel628c28e2015-03-11 14:49:56 +0000154
Julien Gralla13d7202015-08-07 17:34:41 +0100155 data.fgfn = gfn;
Julien Grall5995a682015-05-05 16:54:12 +0100156 data.nr_fgfn = nr;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000157 data.prot = prot;
David Vrabel628c28e2015-03-11 14:49:56 +0000158 data.domid = domid;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000159 data.vma = vma;
David Vrabel628c28e2015-03-11 14:49:56 +0000160 data.pages = pages;
David Vrabel4e8c0c82015-03-11 14:49:57 +0000161 data.index = 0;
162 data.err_ptr = err_ptr;
163 data.mapped = 0;
164
165 err = apply_to_page_range(vma->vm_mm, addr, range,
David Vrabel628c28e2015-03-11 14:49:56 +0000166 remap_pte_fn, &data);
David Vrabel4e8c0c82015-03-11 14:49:57 +0000167 return err < 0 ? err : data.mapped;
David Vrabel628c28e2015-03-11 14:49:56 +0000168}
David Vrabel4e8c0c82015-03-11 14:49:57 +0000169EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
David Vrabel628c28e2015-03-11 14:49:56 +0000170
Julien Grall5995a682015-05-05 16:54:12 +0100171static void unmap_gfn(unsigned long gfn, void *data)
172{
173 struct xen_remove_from_physmap xrp;
174
175 xrp.domid = DOMID_SELF;
176 xrp.gpfn = gfn;
177 (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
178}
179
David Vrabel628c28e2015-03-11 14:49:56 +0000180int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
181 int nr, struct page **pages)
182{
Julien Grall5995a682015-05-05 16:54:12 +0100183 xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
David Vrabel628c28e2015-03-11 14:49:56 +0000184
David Vrabel628c28e2015-03-11 14:49:56 +0000185 return 0;
186}
187EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);