David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 1 | /* |
| 2 | * MMU operations common to all auto-translated physmap guests. |
| 3 | * |
| 4 | * Copyright (C) 2015 Citrix Systems R&D Ltd. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License version 2 |
| 8 | * as published by the Free Software Foundation; or, when distributed |
| 9 | * separately from the Linux kernel or incorporated into other |
| 10 | * software packages, subject to the following license: |
| 11 | * |
| 12 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 13 | * of this source file (the "Software"), to deal in the Software without |
| 14 | * restriction, including without limitation the rights to use, copy, modify, |
| 15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
| 16 | * and to permit persons to whom the Software is furnished to do so, subject to |
| 17 | * the following conditions: |
| 18 | * |
| 19 | * The above copyright notice and this permission notice shall be included in |
| 20 | * all copies or substantial portions of the Software. |
| 21 | * |
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 28 | * IN THE SOFTWARE. |
| 29 | */ |
| 30 | #include <linux/kernel.h> |
| 31 | #include <linux/mm.h> |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 32 | #include <linux/slab.h> |
| 33 | #include <linux/vmalloc.h> |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 34 | |
| 35 | #include <asm/xen/hypercall.h> |
| 36 | #include <asm/xen/hypervisor.h> |
| 37 | |
| 38 | #include <xen/xen.h> |
Srikanth Boddepalli | c5ca49a | 2018-11-27 19:53:27 +0530 | [diff] [blame] | 39 | #include <xen/xen-ops.h> |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 40 | #include <xen/page.h> |
| 41 | #include <xen/interface/xen.h> |
| 42 | #include <xen/interface/memory.h> |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 43 | #include <xen/balloon.h> |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 44 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 45 | typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); |
| 46 | |
| 47 | /* Break down the pages in 4KB chunk and call fn for each gfn */ |
| 48 | static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, |
| 49 | xen_gfn_fn_t fn, void *data) |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 50 | { |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 51 | unsigned long xen_pfn = 0; |
| 52 | struct page *page; |
| 53 | int i; |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 54 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 55 | for (i = 0; i < nr_gfn; i++) { |
| 56 | if ((i % XEN_PFN_PER_PAGE) == 0) { |
| 57 | page = pages[i / XEN_PFN_PER_PAGE]; |
| 58 | xen_pfn = page_to_xen_pfn(page); |
| 59 | } |
| 60 | fn(pfn_to_gfn(xen_pfn++), data); |
| 61 | } |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | struct remap_data { |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 65 | xen_pfn_t *fgfn; /* foreign domain's gfn */ |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 66 | int nr_fgfn; /* Number of foreign gfn left to map */ |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 67 | pgprot_t prot; |
| 68 | domid_t domid; |
| 69 | struct vm_area_struct *vma; |
| 70 | int index; |
| 71 | struct page **pages; |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 72 | struct xen_remap_gfn_info *info; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 73 | int *err_ptr; |
| 74 | int mapped; |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 75 | |
| 76 | /* Hypercall parameters */ |
| 77 | int h_errs[XEN_PFN_PER_PAGE]; |
| 78 | xen_ulong_t h_idxs[XEN_PFN_PER_PAGE]; |
| 79 | xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE]; |
| 80 | |
| 81 | int h_iter; /* Iterator */ |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 82 | }; |
| 83 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 84 | static void setup_hparams(unsigned long gfn, void *data) |
| 85 | { |
| 86 | struct remap_data *info = data; |
| 87 | |
| 88 | info->h_idxs[info->h_iter] = *info->fgfn; |
| 89 | info->h_gpfns[info->h_iter] = gfn; |
| 90 | info->h_errs[info->h_iter] = 0; |
| 91 | |
| 92 | info->h_iter++; |
| 93 | info->fgfn++; |
| 94 | } |
| 95 | |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 96 | static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, |
| 97 | void *data) |
| 98 | { |
| 99 | struct remap_data *info = data; |
| 100 | struct page *page = info->pages[info->index++]; |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 101 | pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot)); |
| 102 | int rc, nr_gfn; |
| 103 | uint32_t i; |
| 104 | struct xen_add_to_physmap_range xatp = { |
| 105 | .domid = DOMID_SELF, |
| 106 | .foreign_domid = info->domid, |
| 107 | .space = XENMAPSPACE_gmfn_foreign, |
| 108 | }; |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 109 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 110 | nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn); |
| 111 | info->nr_fgfn -= nr_gfn; |
| 112 | |
| 113 | info->h_iter = 0; |
| 114 | xen_for_each_gfn(&page, nr_gfn, setup_hparams, info); |
| 115 | BUG_ON(info->h_iter != nr_gfn); |
| 116 | |
| 117 | set_xen_guest_handle(xatp.idxs, info->h_idxs); |
| 118 | set_xen_guest_handle(xatp.gpfns, info->h_gpfns); |
| 119 | set_xen_guest_handle(xatp.errs, info->h_errs); |
| 120 | xatp.size = nr_gfn; |
| 121 | |
| 122 | rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); |
| 123 | |
| 124 | /* info->err_ptr expect to have one error status per Xen PFN */ |
| 125 | for (i = 0; i < nr_gfn; i++) { |
| 126 | int err = (rc < 0) ? rc : info->h_errs[i]; |
| 127 | |
| 128 | *(info->err_ptr++) = err; |
| 129 | if (!err) |
| 130 | info->mapped++; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 131 | } |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 132 | |
| 133 | /* |
| 134 | * Note: The hypercall will return 0 in most of the case if even if |
| 135 | * all the fgmfn are not mapped. We still have to update the pte |
| 136 | * as the userspace may decide to continue. |
| 137 | */ |
| 138 | if (!rc) |
| 139 | set_pte_at(info->vma->vm_mm, addr, ptep, pte); |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 140 | |
| 141 | return 0; |
| 142 | } |
| 143 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 144 | int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 145 | unsigned long addr, |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 146 | xen_pfn_t *gfn, int nr, |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 147 | int *err_ptr, pgprot_t prot, |
| 148 | unsigned domid, |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 149 | struct page **pages) |
| 150 | { |
| 151 | int err; |
| 152 | struct remap_data data; |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 153 | unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT; |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 154 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 155 | /* Kept here for the purpose of making sure code doesn't break |
| 156 | x86 PVOPS */ |
| 157 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 158 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 159 | data.fgfn = gfn; |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 160 | data.nr_fgfn = nr; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 161 | data.prot = prot; |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 162 | data.domid = domid; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 163 | data.vma = vma; |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 164 | data.pages = pages; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 165 | data.index = 0; |
| 166 | data.err_ptr = err_ptr; |
| 167 | data.mapped = 0; |
| 168 | |
| 169 | err = apply_to_page_range(vma->vm_mm, addr, range, |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 170 | remap_pte_fn, &data); |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 171 | return err < 0 ? err : data.mapped; |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 172 | } |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 173 | EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array); |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 174 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 175 | static void unmap_gfn(unsigned long gfn, void *data) |
| 176 | { |
| 177 | struct xen_remove_from_physmap xrp; |
| 178 | |
| 179 | xrp.domid = DOMID_SELF; |
| 180 | xrp.gpfn = gfn; |
| 181 | (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); |
| 182 | } |
| 183 | |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 184 | int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, |
| 185 | int nr, struct page **pages) |
| 186 | { |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 187 | xen_for_each_gfn(pages, nr, unmap_gfn, NULL); |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 188 | |
David Vrabel | 628c28e | 2015-03-11 14:49:56 +0000 | [diff] [blame] | 189 | return 0; |
| 190 | } |
| 191 | EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range); |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 192 | |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 193 | struct map_balloon_pages { |
| 194 | xen_pfn_t *pfns; |
| 195 | unsigned int idx; |
| 196 | }; |
| 197 | |
| 198 | static void setup_balloon_gfn(unsigned long gfn, void *data) |
| 199 | { |
| 200 | struct map_balloon_pages *info = data; |
| 201 | |
| 202 | info->pfns[info->idx++] = gfn; |
| 203 | } |
| 204 | |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 205 | /** |
| 206 | * xen_xlate_map_ballooned_pages - map a new set of ballooned pages |
| 207 | * @gfns: returns the array of corresponding GFNs |
| 208 | * @virt: returns the virtual address of the mapped region |
| 209 | * @nr_grant_frames: number of GFNs |
| 210 | * @return 0 on success, error otherwise |
| 211 | * |
| 212 | * This allocates a set of ballooned pages and maps them into the |
| 213 | * kernel's address space. |
| 214 | */ |
| 215 | int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, |
| 216 | unsigned long nr_grant_frames) |
| 217 | { |
| 218 | struct page **pages; |
| 219 | xen_pfn_t *pfns; |
| 220 | void *vaddr; |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 221 | struct map_balloon_pages data; |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 222 | int rc; |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 223 | unsigned long nr_pages; |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 224 | |
| 225 | BUG_ON(nr_grant_frames == 0); |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 226 | nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE); |
| 227 | pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL); |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 228 | if (!pages) |
| 229 | return -ENOMEM; |
| 230 | |
| 231 | pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); |
| 232 | if (!pfns) { |
| 233 | kfree(pages); |
| 234 | return -ENOMEM; |
| 235 | } |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 236 | rc = alloc_xenballooned_pages(nr_pages, pages); |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 237 | if (rc) { |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 238 | pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__, |
| 239 | nr_pages, rc); |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 240 | kfree(pages); |
| 241 | kfree(pfns); |
| 242 | return rc; |
| 243 | } |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 244 | |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 245 | data.pfns = pfns; |
| 246 | data.idx = 0; |
| 247 | xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data); |
| 248 | |
| 249 | vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL); |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 250 | if (!vaddr) { |
Shannon Zhao | 975fac3c | 2016-04-07 20:03:20 +0800 | [diff] [blame] | 251 | pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__, |
| 252 | nr_pages, rc); |
| 253 | free_xenballooned_pages(nr_pages, pages); |
Shannon Zhao | 243848f | 2016-04-07 20:03:19 +0800 | [diff] [blame] | 254 | kfree(pages); |
| 255 | kfree(pfns); |
| 256 | return -ENOMEM; |
| 257 | } |
| 258 | kfree(pages); |
| 259 | |
| 260 | *gfns = pfns; |
| 261 | *virt = vaddr; |
| 262 | |
| 263 | return 0; |
| 264 | } |
| 265 | EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); |