Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * privcmd.c |
| 3 | * |
| 4 | * Interface to privileged domain-0 commands. |
| 5 | * |
| 6 | * Copyright (c) 2002-2004, K A Fraser, B Dragovic |
| 7 | */ |
| 8 | |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 9 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
| 10 | |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 11 | #include <linux/kernel.h> |
Bastian Blank | d8414d3 | 2011-12-16 11:34:33 -0500 | [diff] [blame] | 12 | #include <linux/module.h> |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 13 | #include <linux/sched.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/errno.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/mman.h> |
| 19 | #include <linux/uaccess.h> |
| 20 | #include <linux/swap.h> |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 21 | #include <linux/highmem.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/seq_file.h> |
Bastian Blank | d8414d3 | 2011-12-16 11:34:33 -0500 | [diff] [blame] | 24 | #include <linux/miscdevice.h> |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 25 | |
| 26 | #include <asm/pgalloc.h> |
| 27 | #include <asm/pgtable.h> |
| 28 | #include <asm/tlb.h> |
| 29 | #include <asm/xen/hypervisor.h> |
| 30 | #include <asm/xen/hypercall.h> |
| 31 | |
| 32 | #include <xen/xen.h> |
| 33 | #include <xen/privcmd.h> |
| 34 | #include <xen/interface/xen.h> |
| 35 | #include <xen/features.h> |
| 36 | #include <xen/page.h> |
Ian Campbell | de1ef20 | 2009-05-21 10:09:46 +0100 | [diff] [blame] | 37 | #include <xen/xen-ops.h> |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 38 | #include <xen/balloon.h> |
Ian Campbell | f020e29 | 2009-05-20 15:42:14 +0100 | [diff] [blame] | 39 | |
Bastian Blank | d8414d3 | 2011-12-16 11:34:33 -0500 | [diff] [blame] | 40 | #include "privcmd.h" |
| 41 | |
| 42 | MODULE_LICENSE("GPL"); |
| 43 | |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 44 | #define PRIV_VMA_LOCKED ((void *)1) |
| 45 | |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 46 | static int privcmd_vma_range_is_mapped( |
| 47 | struct vm_area_struct *vma, |
| 48 | unsigned long addr, |
| 49 | unsigned long nr_pages); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 50 | |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 51 | static long privcmd_ioctl_hypercall(void __user *udata) |
| 52 | { |
| 53 | struct privcmd_hypercall hypercall; |
| 54 | long ret; |
| 55 | |
| 56 | if (copy_from_user(&hypercall, udata, sizeof(hypercall))) |
| 57 | return -EFAULT; |
| 58 | |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 59 | xen_preemptible_hcall_begin(); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 60 | ret = privcmd_call(hypercall.op, |
| 61 | hypercall.arg[0], hypercall.arg[1], |
| 62 | hypercall.arg[2], hypercall.arg[3], |
| 63 | hypercall.arg[4]); |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 64 | xen_preemptible_hcall_end(); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 65 | |
| 66 | return ret; |
| 67 | } |
| 68 | |
| 69 | static void free_page_list(struct list_head *pages) |
| 70 | { |
| 71 | struct page *p, *n; |
| 72 | |
| 73 | list_for_each_entry_safe(p, n, pages, lru) |
| 74 | __free_page(p); |
| 75 | |
| 76 | INIT_LIST_HEAD(pages); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * Given an array of items in userspace, return a list of pages |
| 81 | * containing the data. If copying fails, either because of memory |
| 82 | * allocation failure or a problem reading user memory, return an |
| 83 | * error code; its up to the caller to dispose of any partial list. |
| 84 | */ |
| 85 | static int gather_array(struct list_head *pagelist, |
| 86 | unsigned nelem, size_t size, |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 87 | const void __user *data) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 88 | { |
| 89 | unsigned pageidx; |
| 90 | void *pagedata; |
| 91 | int ret; |
| 92 | |
| 93 | if (size > PAGE_SIZE) |
| 94 | return 0; |
| 95 | |
| 96 | pageidx = PAGE_SIZE; |
| 97 | pagedata = NULL; /* quiet, gcc */ |
| 98 | while (nelem--) { |
| 99 | if (pageidx > PAGE_SIZE-size) { |
| 100 | struct page *page = alloc_page(GFP_KERNEL); |
| 101 | |
| 102 | ret = -ENOMEM; |
| 103 | if (page == NULL) |
| 104 | goto fail; |
| 105 | |
| 106 | pagedata = page_address(page); |
| 107 | |
| 108 | list_add_tail(&page->lru, pagelist); |
| 109 | pageidx = 0; |
| 110 | } |
| 111 | |
| 112 | ret = -EFAULT; |
| 113 | if (copy_from_user(pagedata + pageidx, data, size)) |
| 114 | goto fail; |
| 115 | |
| 116 | data += size; |
| 117 | pageidx += size; |
| 118 | } |
| 119 | |
| 120 | ret = 0; |
| 121 | |
| 122 | fail: |
| 123 | return ret; |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Call function "fn" on each element of the array fragmented |
| 128 | * over a list of pages. |
| 129 | */ |
| 130 | static int traverse_pages(unsigned nelem, size_t size, |
| 131 | struct list_head *pos, |
| 132 | int (*fn)(void *data, void *state), |
| 133 | void *state) |
| 134 | { |
| 135 | void *pagedata; |
| 136 | unsigned pageidx; |
Ian Campbell | f020e29 | 2009-05-20 15:42:14 +0100 | [diff] [blame] | 137 | int ret = 0; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 138 | |
| 139 | BUG_ON(size > PAGE_SIZE); |
| 140 | |
| 141 | pageidx = PAGE_SIZE; |
| 142 | pagedata = NULL; /* hush, gcc */ |
| 143 | |
| 144 | while (nelem--) { |
| 145 | if (pageidx > PAGE_SIZE-size) { |
| 146 | struct page *page; |
| 147 | pos = pos->next; |
| 148 | page = list_entry(pos, struct page, lru); |
| 149 | pagedata = page_address(page); |
| 150 | pageidx = 0; |
| 151 | } |
| 152 | |
| 153 | ret = (*fn)(pagedata + pageidx, state); |
| 154 | if (ret) |
| 155 | break; |
| 156 | pageidx += size; |
| 157 | } |
| 158 | |
| 159 | return ret; |
| 160 | } |
| 161 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 162 | /* |
| 163 | * Similar to traverse_pages, but use each page as a "block" of |
| 164 | * data to be processed as one unit. |
| 165 | */ |
| 166 | static int traverse_pages_block(unsigned nelem, size_t size, |
| 167 | struct list_head *pos, |
| 168 | int (*fn)(void *data, int nr, void *state), |
| 169 | void *state) |
| 170 | { |
| 171 | void *pagedata; |
| 172 | unsigned pageidx; |
| 173 | int ret = 0; |
| 174 | |
| 175 | BUG_ON(size > PAGE_SIZE); |
| 176 | |
| 177 | pageidx = PAGE_SIZE; |
| 178 | |
| 179 | while (nelem) { |
| 180 | int nr = (PAGE_SIZE/size); |
| 181 | struct page *page; |
| 182 | if (nr > nelem) |
| 183 | nr = nelem; |
| 184 | pos = pos->next; |
| 185 | page = list_entry(pos, struct page, lru); |
| 186 | pagedata = page_address(page); |
| 187 | ret = (*fn)(pagedata, nr, state); |
| 188 | if (ret) |
| 189 | break; |
| 190 | nelem -= nr; |
| 191 | } |
| 192 | |
| 193 | return ret; |
| 194 | } |
| 195 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 196 | struct mmap_gfn_state { |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 197 | unsigned long va; |
| 198 | struct vm_area_struct *vma; |
| 199 | domid_t domain; |
| 200 | }; |
| 201 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 202 | static int mmap_gfn_range(void *data, void *state) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 203 | { |
| 204 | struct privcmd_mmap_entry *msg = data; |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 205 | struct mmap_gfn_state *st = state; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 206 | struct vm_area_struct *vma = st->vma; |
| 207 | int rc; |
| 208 | |
| 209 | /* Do not allow range to wrap the address space. */ |
| 210 | if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || |
| 211 | ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) |
| 212 | return -EINVAL; |
| 213 | |
| 214 | /* Range chunks must be contiguous in va space. */ |
| 215 | if ((msg->va != st->va) || |
| 216 | ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) |
| 217 | return -EINVAL; |
| 218 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 219 | rc = xen_remap_domain_gfn_range(vma, |
Ian Campbell | de1ef20 | 2009-05-21 10:09:46 +0100 | [diff] [blame] | 220 | msg->va & PAGE_MASK, |
| 221 | msg->mfn, msg->npages, |
| 222 | vma->vm_page_prot, |
Ian Campbell | 9a032e3 | 2012-10-17 13:37:49 -0700 | [diff] [blame] | 223 | st->domain, NULL); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 224 | if (rc < 0) |
| 225 | return rc; |
| 226 | |
| 227 | st->va += msg->npages << PAGE_SHIFT; |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static long privcmd_ioctl_mmap(void __user *udata) |
| 233 | { |
| 234 | struct privcmd_mmap mmapcmd; |
| 235 | struct mm_struct *mm = current->mm; |
| 236 | struct vm_area_struct *vma; |
| 237 | int rc; |
| 238 | LIST_HEAD(pagelist); |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 239 | struct mmap_gfn_state state; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 240 | |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 241 | /* We only support privcmd_ioctl_mmap_batch for auto translated. */ |
| 242 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
| 243 | return -ENOSYS; |
| 244 | |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 245 | if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) |
| 246 | return -EFAULT; |
| 247 | |
| 248 | rc = gather_array(&pagelist, |
| 249 | mmapcmd.num, sizeof(struct privcmd_mmap_entry), |
| 250 | mmapcmd.entry); |
| 251 | |
| 252 | if (rc || list_empty(&pagelist)) |
| 253 | goto out; |
| 254 | |
| 255 | down_write(&mm->mmap_sem); |
| 256 | |
| 257 | { |
| 258 | struct page *page = list_first_entry(&pagelist, |
| 259 | struct page, lru); |
| 260 | struct privcmd_mmap_entry *msg = page_address(page); |
| 261 | |
| 262 | vma = find_vma(mm, msg->va); |
| 263 | rc = -EINVAL; |
| 264 | |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 265 | if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 266 | goto out_up; |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 267 | vma->vm_private_data = PRIV_VMA_LOCKED; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | state.va = vma->vm_start; |
| 271 | state.vma = vma; |
| 272 | state.domain = mmapcmd.dom; |
| 273 | |
| 274 | rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), |
| 275 | &pagelist, |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 276 | mmap_gfn_range, &state); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 277 | |
| 278 | |
| 279 | out_up: |
| 280 | up_write(&mm->mmap_sem); |
| 281 | |
| 282 | out: |
| 283 | free_page_list(&pagelist); |
| 284 | |
| 285 | return rc; |
| 286 | } |
| 287 | |
| 288 | struct mmap_batch_state { |
| 289 | domid_t domain; |
| 290 | unsigned long va; |
| 291 | struct vm_area_struct *vma; |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 292 | int index; |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 293 | /* A tristate: |
| 294 | * 0 for no errors |
| 295 | * 1 if at least one error has happened (and no |
| 296 | * -ENOENT errors have happened) |
| 297 | * -ENOENT if at least 1 -ENOENT has happened. |
| 298 | */ |
| 299 | int global_error; |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 300 | int version; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 301 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 302 | /* User-space gfn array to store errors in the second pass for V1. */ |
| 303 | xen_pfn_t __user *user_gfn; |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 304 | /* User-space int array to store errors in the second pass for V2. */ |
| 305 | int __user *user_err; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 306 | }; |
| 307 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 308 | /* auto translated dom0 note: if domU being created is PV, then gfn is |
| 309 | * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP). |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 310 | */ |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 311 | static int mmap_batch_fn(void *data, int nr, void *state) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 312 | { |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 313 | xen_pfn_t *gfnp = data; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 314 | struct mmap_batch_state *st = state; |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 315 | struct vm_area_struct *vma = st->vma; |
| 316 | struct page **pages = vma->vm_private_data; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 317 | struct page **cur_pages = NULL; |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 318 | int ret; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 319 | |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 320 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 321 | cur_pages = &pages[st->index]; |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 322 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 323 | BUG_ON(nr < 0); |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 324 | ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, |
| 325 | (int *)gfnp, st->vma->vm_page_prot, |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 326 | st->domain, cur_pages); |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 327 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 328 | /* Adjust the global_error? */ |
| 329 | if (ret != nr) { |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 330 | if (ret == -ENOENT) |
| 331 | st->global_error = -ENOENT; |
| 332 | else { |
| 333 | /* Record that at least one error has happened. */ |
| 334 | if (st->global_error == 0) |
| 335 | st->global_error = 1; |
| 336 | } |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 337 | } |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 338 | st->va += PAGE_SIZE * nr; |
| 339 | st->index += nr; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 340 | |
| 341 | return 0; |
| 342 | } |
| 343 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 344 | static int mmap_return_error(int err, struct mmap_batch_state *st) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 345 | { |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 346 | int ret; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 347 | |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 348 | if (st->version == 1) { |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 349 | if (err) { |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 350 | xen_pfn_t gfn; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 351 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 352 | ret = get_user(gfn, st->user_gfn); |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 353 | if (ret < 0) |
| 354 | return ret; |
| 355 | /* |
| 356 | * V1 encodes the error codes in the 32bit top |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 357 | * nibble of the gfn (with its known |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 358 | * limitations vis-a-vis 64 bit callers). |
| 359 | */ |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 360 | gfn |= (err == -ENOENT) ? |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 361 | PRIVCMD_MMAPBATCH_PAGED_ERROR : |
| 362 | PRIVCMD_MMAPBATCH_MFN_ERROR; |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 363 | return __put_user(gfn, st->user_gfn++); |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 364 | } else |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 365 | st->user_gfn++; |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 366 | } else { /* st->version == 2 */ |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 367 | if (err) |
| 368 | return __put_user(err, st->user_err++); |
| 369 | else |
| 370 | st->user_err++; |
| 371 | } |
| 372 | |
| 373 | return 0; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 374 | } |
| 375 | |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 376 | static int mmap_return_errors(void *data, int nr, void *state) |
| 377 | { |
| 378 | struct mmap_batch_state *st = state; |
| 379 | int *errs = data; |
| 380 | int i; |
| 381 | int ret; |
| 382 | |
| 383 | for (i = 0; i < nr; i++) { |
| 384 | ret = mmap_return_error(errs[i], st); |
| 385 | if (ret < 0) |
| 386 | return ret; |
| 387 | } |
| 388 | return 0; |
| 389 | } |
| 390 | |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 391 | /* Allocate pfns that are then mapped with gfns from foreign domid. Update |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 392 | * the vma with the page info to use later. |
| 393 | * Returns: 0 if success, otherwise -errno |
| 394 | */ |
| 395 | static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) |
| 396 | { |
| 397 | int rc; |
| 398 | struct page **pages; |
| 399 | |
| 400 | pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); |
| 401 | if (pages == NULL) |
| 402 | return -ENOMEM; |
| 403 | |
David Vrabel | 81b286e | 2015-06-25 13:12:46 +0100 | [diff] [blame] | 404 | rc = alloc_xenballooned_pages(numpgs, pages); |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 405 | if (rc != 0) { |
| 406 | pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, |
| 407 | numpgs, rc); |
| 408 | kfree(pages); |
| 409 | return -ENOMEM; |
| 410 | } |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 411 | BUG_ON(vma->vm_private_data != NULL); |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 412 | vma->vm_private_data = pages; |
| 413 | |
| 414 | return 0; |
| 415 | } |
| 416 | |
Kirill A. Shutemov | 7cbea8d | 2015-09-09 15:39:26 -0700 | [diff] [blame] | 417 | static const struct vm_operations_struct privcmd_vm_ops; |
Jeremy Fitzhardinge | f31fdf5 | 2009-03-08 04:10:00 -0700 | [diff] [blame] | 418 | |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 419 | static long privcmd_ioctl_mmap_batch(void __user *udata, int version) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 420 | { |
| 421 | int ret; |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 422 | struct privcmd_mmapbatch_v2 m; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 423 | struct mm_struct *mm = current->mm; |
| 424 | struct vm_area_struct *vma; |
| 425 | unsigned long nr_pages; |
| 426 | LIST_HEAD(pagelist); |
| 427 | struct mmap_batch_state state; |
| 428 | |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 429 | switch (version) { |
| 430 | case 1: |
| 431 | if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch))) |
| 432 | return -EFAULT; |
| 433 | /* Returns per-frame error in m.arr. */ |
| 434 | m.err = NULL; |
| 435 | if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr))) |
| 436 | return -EFAULT; |
| 437 | break; |
| 438 | case 2: |
| 439 | if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2))) |
| 440 | return -EFAULT; |
| 441 | /* Returns per-frame error code in m.err. */ |
| 442 | if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err)))) |
| 443 | return -EFAULT; |
| 444 | break; |
| 445 | default: |
| 446 | return -EINVAL; |
| 447 | } |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 448 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 449 | nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 450 | if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) |
| 451 | return -EINVAL; |
| 452 | |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 453 | ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 454 | |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 455 | if (ret) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 456 | goto out; |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 457 | if (list_empty(&pagelist)) { |
| 458 | ret = -EINVAL; |
| 459 | goto out; |
| 460 | } |
| 461 | |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 462 | if (version == 2) { |
| 463 | /* Zero error array now to only copy back actual errors. */ |
| 464 | if (clear_user(m.err, sizeof(int) * m.num)) { |
| 465 | ret = -EFAULT; |
| 466 | goto out; |
| 467 | } |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 468 | } |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 469 | |
| 470 | down_write(&mm->mmap_sem); |
| 471 | |
| 472 | vma = find_vma(mm, m.addr); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 473 | if (!vma || |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 474 | vma->vm_ops != &privcmd_vm_ops) { |
Mats Petersson | 68fa965 | 2012-11-16 18:36:49 +0000 | [diff] [blame] | 475 | ret = -EINVAL; |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 476 | goto out_unlock; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 477 | } |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 478 | |
| 479 | /* |
| 480 | * Caller must either: |
| 481 | * |
| 482 | * Map the whole VMA range, which will also allocate all the |
| 483 | * pages required for the auto_translated_physmap case. |
| 484 | * |
| 485 | * Or |
| 486 | * |
| 487 | * Map unmapped holes left from a previous map attempt (e.g., |
| 488 | * because those foreign frames were previously paged out). |
| 489 | */ |
| 490 | if (vma->vm_private_data == NULL) { |
| 491 | if (m.addr != vma->vm_start || |
| 492 | m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { |
| 493 | ret = -EINVAL; |
| 494 | goto out_unlock; |
| 495 | } |
| 496 | if (xen_feature(XENFEAT_auto_translated_physmap)) { |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 497 | ret = alloc_empty_pages(vma, nr_pages); |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 498 | if (ret < 0) |
| 499 | goto out_unlock; |
| 500 | } else |
| 501 | vma->vm_private_data = PRIV_VMA_LOCKED; |
| 502 | } else { |
| 503 | if (m.addr < vma->vm_start || |
| 504 | m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { |
| 505 | ret = -EINVAL; |
| 506 | goto out_unlock; |
| 507 | } |
| 508 | if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { |
| 509 | ret = -EINVAL; |
| 510 | goto out_unlock; |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 511 | } |
| 512 | } |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 513 | |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 514 | state.domain = m.dom; |
| 515 | state.vma = vma; |
| 516 | state.va = m.addr; |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 517 | state.index = 0; |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 518 | state.global_error = 0; |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 519 | state.version = version; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 520 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 521 | BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0); |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 522 | /* mmap_batch_fn guarantees ret == 0 */ |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 523 | BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t), |
| 524 | &pagelist, mmap_batch_fn, &state)); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 525 | |
| 526 | up_write(&mm->mmap_sem); |
| 527 | |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 528 | if (state.global_error) { |
| 529 | /* Write back errors in second pass. */ |
Julien Grall | a13d720 | 2015-08-07 17:34:41 +0100 | [diff] [blame] | 530 | state.user_gfn = (xen_pfn_t *)m.arr; |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 531 | state.user_err = m.err; |
David Vrabel | 4e8c0c8 | 2015-03-11 14:49:57 +0000 | [diff] [blame] | 532 | ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), |
| 533 | &pagelist, mmap_return_errors, &state); |
Andres Lagar-Cavilla | 99beae6 | 2013-01-14 22:35:40 -0500 | [diff] [blame] | 534 | } else |
| 535 | ret = 0; |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 536 | |
| 537 | /* If we have not had any EFAULT-like global errors then set the global |
| 538 | * error to -ENOENT if necessary. */ |
| 539 | if ((ret == 0) && (state.global_error == -ENOENT)) |
| 540 | ret = -ENOENT; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 541 | |
| 542 | out: |
| 543 | free_page_list(&pagelist); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 544 | return ret; |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 545 | |
| 546 | out_unlock: |
| 547 | up_write(&mm->mmap_sem); |
| 548 | goto out; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | static long privcmd_ioctl(struct file *file, |
| 552 | unsigned int cmd, unsigned long data) |
| 553 | { |
| 554 | int ret = -ENOSYS; |
| 555 | void __user *udata = (void __user *) data; |
| 556 | |
| 557 | switch (cmd) { |
| 558 | case IOCTL_PRIVCMD_HYPERCALL: |
| 559 | ret = privcmd_ioctl_hypercall(udata); |
| 560 | break; |
| 561 | |
| 562 | case IOCTL_PRIVCMD_MMAP: |
| 563 | ret = privcmd_ioctl_mmap(udata); |
| 564 | break; |
| 565 | |
| 566 | case IOCTL_PRIVCMD_MMAPBATCH: |
Andres Lagar-Cavilla | ceb90fa | 2012-08-31 09:59:30 -0400 | [diff] [blame] | 567 | ret = privcmd_ioctl_mmap_batch(udata, 1); |
| 568 | break; |
| 569 | |
| 570 | case IOCTL_PRIVCMD_MMAPBATCH_V2: |
| 571 | ret = privcmd_ioctl_mmap_batch(udata, 2); |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 572 | break; |
| 573 | |
| 574 | default: |
| 575 | ret = -EINVAL; |
| 576 | break; |
| 577 | } |
| 578 | |
| 579 | return ret; |
| 580 | } |
| 581 | |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 582 | static void privcmd_close(struct vm_area_struct *vma) |
| 583 | { |
| 584 | struct page **pages = vma->vm_private_data; |
| 585 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 586 | int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT; |
Ian Campbell | b6497b3 | 2013-12-06 17:55:56 +0000 | [diff] [blame] | 587 | int rc; |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 588 | |
Dan Carpenter | 9eff37a | 2012-11-05 09:42:17 +0300 | [diff] [blame] | 589 | if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 590 | return; |
| 591 | |
Julien Grall | 5995a68 | 2015-05-05 16:54:12 +0100 | [diff] [blame] | 592 | rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); |
Ian Campbell | b6497b3 | 2013-12-06 17:55:56 +0000 | [diff] [blame] | 593 | if (rc == 0) |
| 594 | free_xenballooned_pages(numpgs, pages); |
| 595 | else |
| 596 | pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", |
| 597 | numpgs, rc); |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 598 | kfree(pages); |
| 599 | } |
| 600 | |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 601 | static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 602 | { |
Jeremy Fitzhardinge | 441c741 | 2009-03-06 09:56:59 -0800 | [diff] [blame] | 603 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", |
| 604 | vma, vma->vm_start, vma->vm_end, |
| 605 | vmf->pgoff, vmf->virtual_address); |
| 606 | |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 607 | return VM_FAULT_SIGBUS; |
| 608 | } |
| 609 | |
Kirill A. Shutemov | 7cbea8d | 2015-09-09 15:39:26 -0700 | [diff] [blame] | 610 | static const struct vm_operations_struct privcmd_vm_ops = { |
Mukesh Rathor | d71f513 | 2012-10-17 17:11:21 -0700 | [diff] [blame] | 611 | .close = privcmd_close, |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 612 | .fault = privcmd_fault |
| 613 | }; |
| 614 | |
| 615 | static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) |
| 616 | { |
Stefano Stabellini | e060e7af | 2010-11-11 12:37:43 -0800 | [diff] [blame] | 617 | /* DONTCOPY is essential for Xen because copy_page_range doesn't know |
| 618 | * how to recreate these mappings */ |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 619 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | |
| 620 | VM_DONTEXPAND | VM_DONTDUMP; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 621 | vma->vm_ops = &privcmd_vm_ops; |
| 622 | vma->vm_private_data = NULL; |
| 623 | |
| 624 | return 0; |
| 625 | } |
| 626 | |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 627 | /* |
| 628 | * For MMAPBATCH*. This allows asserting the singleshot mapping |
| 629 | * on a per pfn/pte basis. Mapping calls that fail with ENOENT |
| 630 | * can be then retried until success. |
| 631 | */ |
| 632 | static int is_mapped_fn(pte_t *pte, struct page *pmd_page, |
| 633 | unsigned long addr, void *data) |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 634 | { |
Andres Lagar-Cavilla | a5deabe | 2013-08-23 18:10:06 +0100 | [diff] [blame] | 635 | return pte_none(*pte) ? 0 : -EBUSY; |
| 636 | } |
| 637 | |
| 638 | static int privcmd_vma_range_is_mapped( |
| 639 | struct vm_area_struct *vma, |
| 640 | unsigned long addr, |
| 641 | unsigned long nr_pages) |
| 642 | { |
| 643 | return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, |
| 644 | is_mapped_fn, NULL) != 0; |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 645 | } |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 646 | |
Bastian Blank | d8414d3 | 2011-12-16 11:34:33 -0500 | [diff] [blame] | 647 | const struct file_operations xen_privcmd_fops = { |
| 648 | .owner = THIS_MODULE, |
Jeremy Fitzhardinge | 1c5de19 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 649 | .unlocked_ioctl = privcmd_ioctl, |
| 650 | .mmap = privcmd_mmap, |
| 651 | }; |
Bastian Blank | d8414d3 | 2011-12-16 11:34:33 -0500 | [diff] [blame] | 652 | EXPORT_SYMBOL_GPL(xen_privcmd_fops); |
| 653 | |
| 654 | static struct miscdevice privcmd_dev = { |
| 655 | .minor = MISC_DYNAMIC_MINOR, |
| 656 | .name = "xen/privcmd", |
| 657 | .fops = &xen_privcmd_fops, |
| 658 | }; |
| 659 | |
| 660 | static int __init privcmd_init(void) |
| 661 | { |
| 662 | int err; |
| 663 | |
| 664 | if (!xen_domain()) |
| 665 | return -ENODEV; |
| 666 | |
| 667 | err = misc_register(&privcmd_dev); |
| 668 | if (err != 0) { |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 669 | pr_err("Could not register Xen privcmd device\n"); |
Bastian Blank | d8414d3 | 2011-12-16 11:34:33 -0500 | [diff] [blame] | 670 | return err; |
| 671 | } |
| 672 | return 0; |
| 673 | } |
| 674 | |
| 675 | static void __exit privcmd_exit(void) |
| 676 | { |
| 677 | misc_deregister(&privcmd_dev); |
| 678 | } |
| 679 | |
| 680 | module_init(privcmd_init); |
| 681 | module_exit(privcmd_exit); |