blob: 59ac71c4a04352d055f40435444e7ccc46debf31 [file] [log] [blame]
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -08001/******************************************************************************
2 * privcmd.c
3 *
4 * Interface to privileged domain-0 commands.
5 *
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
7 */
8
Joe Perches283c0972013-06-28 03:21:41 -07009#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080011#include <linux/kernel.h>
Bastian Blankd8414d32011-12-16 11:34:33 -050012#include <linux/module.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080013#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/uaccess.h>
20#include <linux/swap.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080021#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/seq_file.h>
Bastian Blankd8414d32011-12-16 11:34:33 -050024#include <linux/miscdevice.h>
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080025
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/tlb.h>
29#include <asm/xen/hypervisor.h>
30#include <asm/xen/hypercall.h>
31
32#include <xen/xen.h>
33#include <xen/privcmd.h>
34#include <xen/interface/xen.h>
35#include <xen/features.h>
36#include <xen/page.h>
Ian Campbellde1ef202009-05-21 10:09:46 +010037#include <xen/xen-ops.h>
Mukesh Rathord71f5132012-10-17 17:11:21 -070038#include <xen/balloon.h>
Ian Campbellf020e292009-05-20 15:42:14 +010039
Bastian Blankd8414d32011-12-16 11:34:33 -050040#include "privcmd.h"
41
42MODULE_LICENSE("GPL");
43
Mukesh Rathord71f5132012-10-17 17:11:21 -070044#define PRIV_VMA_LOCKED ((void *)1)
45
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +010046static int privcmd_vma_range_is_mapped(
47 struct vm_area_struct *vma,
48 unsigned long addr,
49 unsigned long nr_pages);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080050
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080051static long privcmd_ioctl_hypercall(void __user *udata)
52{
53 struct privcmd_hypercall hypercall;
54 long ret;
55
56 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
57 return -EFAULT;
58
David Vrabelfdfd8112015-02-19 15:23:17 +000059 xen_preemptible_hcall_begin();
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080060 ret = privcmd_call(hypercall.op,
61 hypercall.arg[0], hypercall.arg[1],
62 hypercall.arg[2], hypercall.arg[3],
63 hypercall.arg[4]);
David Vrabelfdfd8112015-02-19 15:23:17 +000064 xen_preemptible_hcall_end();
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080065
66 return ret;
67}
68
69static void free_page_list(struct list_head *pages)
70{
71 struct page *p, *n;
72
73 list_for_each_entry_safe(p, n, pages, lru)
74 __free_page(p);
75
76 INIT_LIST_HEAD(pages);
77}
78
79/*
80 * Given an array of items in userspace, return a list of pages
81 * containing the data. If copying fails, either because of memory
82 * allocation failure or a problem reading user memory, return an
83 * error code; its up to the caller to dispose of any partial list.
84 */
85static int gather_array(struct list_head *pagelist,
86 unsigned nelem, size_t size,
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -040087 const void __user *data)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -080088{
89 unsigned pageidx;
90 void *pagedata;
91 int ret;
92
93 if (size > PAGE_SIZE)
94 return 0;
95
96 pageidx = PAGE_SIZE;
97 pagedata = NULL; /* quiet, gcc */
98 while (nelem--) {
99 if (pageidx > PAGE_SIZE-size) {
100 struct page *page = alloc_page(GFP_KERNEL);
101
102 ret = -ENOMEM;
103 if (page == NULL)
104 goto fail;
105
106 pagedata = page_address(page);
107
108 list_add_tail(&page->lru, pagelist);
109 pageidx = 0;
110 }
111
112 ret = -EFAULT;
113 if (copy_from_user(pagedata + pageidx, data, size))
114 goto fail;
115
116 data += size;
117 pageidx += size;
118 }
119
120 ret = 0;
121
122fail:
123 return ret;
124}
125
126/*
127 * Call function "fn" on each element of the array fragmented
128 * over a list of pages.
129 */
130static int traverse_pages(unsigned nelem, size_t size,
131 struct list_head *pos,
132 int (*fn)(void *data, void *state),
133 void *state)
134{
135 void *pagedata;
136 unsigned pageidx;
Ian Campbellf020e292009-05-20 15:42:14 +0100137 int ret = 0;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800138
139 BUG_ON(size > PAGE_SIZE);
140
141 pageidx = PAGE_SIZE;
142 pagedata = NULL; /* hush, gcc */
143
144 while (nelem--) {
145 if (pageidx > PAGE_SIZE-size) {
146 struct page *page;
147 pos = pos->next;
148 page = list_entry(pos, struct page, lru);
149 pagedata = page_address(page);
150 pageidx = 0;
151 }
152
153 ret = (*fn)(pagedata + pageidx, state);
154 if (ret)
155 break;
156 pageidx += size;
157 }
158
159 return ret;
160}
161
162struct mmap_mfn_state {
163 unsigned long va;
164 struct vm_area_struct *vma;
165 domid_t domain;
166};
167
168static int mmap_mfn_range(void *data, void *state)
169{
170 struct privcmd_mmap_entry *msg = data;
171 struct mmap_mfn_state *st = state;
172 struct vm_area_struct *vma = st->vma;
173 int rc;
174
175 /* Do not allow range to wrap the address space. */
176 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
177 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
178 return -EINVAL;
179
180 /* Range chunks must be contiguous in va space. */
181 if ((msg->va != st->va) ||
182 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
183 return -EINVAL;
184
Ian Campbellde1ef202009-05-21 10:09:46 +0100185 rc = xen_remap_domain_mfn_range(vma,
186 msg->va & PAGE_MASK,
187 msg->mfn, msg->npages,
188 vma->vm_page_prot,
Ian Campbell9a032e32012-10-17 13:37:49 -0700189 st->domain, NULL);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800190 if (rc < 0)
191 return rc;
192
193 st->va += msg->npages << PAGE_SHIFT;
194
195 return 0;
196}
197
198static long privcmd_ioctl_mmap(void __user *udata)
199{
200 struct privcmd_mmap mmapcmd;
201 struct mm_struct *mm = current->mm;
202 struct vm_area_struct *vma;
203 int rc;
204 LIST_HEAD(pagelist);
205 struct mmap_mfn_state state;
206
Mukesh Rathord71f5132012-10-17 17:11:21 -0700207 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
208 if (xen_feature(XENFEAT_auto_translated_physmap))
209 return -ENOSYS;
210
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800211 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
212 return -EFAULT;
213
214 rc = gather_array(&pagelist,
215 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
216 mmapcmd.entry);
217
218 if (rc || list_empty(&pagelist))
219 goto out;
220
221 down_write(&mm->mmap_sem);
222
223 {
224 struct page *page = list_first_entry(&pagelist,
225 struct page, lru);
226 struct privcmd_mmap_entry *msg = page_address(page);
227
228 vma = find_vma(mm, msg->va);
229 rc = -EINVAL;
230
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100231 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800232 goto out_up;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100233 vma->vm_private_data = PRIV_VMA_LOCKED;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800234 }
235
236 state.va = vma->vm_start;
237 state.vma = vma;
238 state.domain = mmapcmd.dom;
239
240 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
241 &pagelist,
242 mmap_mfn_range, &state);
243
244
245out_up:
246 up_write(&mm->mmap_sem);
247
248out:
249 free_page_list(&pagelist);
250
251 return rc;
252}
253
254struct mmap_batch_state {
255 domid_t domain;
256 unsigned long va;
257 struct vm_area_struct *vma;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700258 int index;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400259 /* A tristate:
260 * 0 for no errors
261 * 1 if at least one error has happened (and no
262 * -ENOENT errors have happened)
263 * -ENOENT if at least 1 -ENOENT has happened.
264 */
265 int global_error;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500266 int version;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800267
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400268 /* User-space mfn array to store errors in the second pass for V1. */
269 xen_pfn_t __user *user_mfn;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500270 /* User-space int array to store errors in the second pass for V2. */
271 int __user *user_err;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800272};
273
Mukesh Rathord71f5132012-10-17 17:11:21 -0700274/* auto translated dom0 note: if domU being created is PV, then mfn is
275 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
276 */
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800277static int mmap_batch_fn(void *data, void *state)
278{
279 xen_pfn_t *mfnp = data;
280 struct mmap_batch_state *st = state;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700281 struct vm_area_struct *vma = st->vma;
282 struct page **pages = vma->vm_private_data;
283 struct page *cur_page = NULL;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400284 int ret;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800285
Mukesh Rathord71f5132012-10-17 17:11:21 -0700286 if (xen_feature(XENFEAT_auto_translated_physmap))
287 cur_page = pages[st->index++];
288
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400289 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
Ian Campbell9a032e32012-10-17 13:37:49 -0700290 st->vma->vm_page_prot, st->domain,
Mukesh Rathord71f5132012-10-17 17:11:21 -0700291 &cur_page);
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400292
293 /* Store error code for second pass. */
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500294 if (st->version == 1) {
295 if (ret < 0) {
296 /*
297 * V1 encodes the error codes in the 32bit top nibble of the
298 * mfn (with its known limitations vis-a-vis 64 bit callers).
299 */
300 *mfnp |= (ret == -ENOENT) ?
301 PRIVCMD_MMAPBATCH_PAGED_ERROR :
302 PRIVCMD_MMAPBATCH_MFN_ERROR;
303 }
304 } else { /* st->version == 2 */
305 *((int *) mfnp) = ret;
306 }
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400307
308 /* And see if it affects the global_error. */
309 if (ret < 0) {
310 if (ret == -ENOENT)
311 st->global_error = -ENOENT;
312 else {
313 /* Record that at least one error has happened. */
314 if (st->global_error == 0)
315 st->global_error = 1;
316 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800317 }
318 st->va += PAGE_SIZE;
319
320 return 0;
321}
322
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500323static int mmap_return_errors(void *data, void *state)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800324{
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800325 struct mmap_batch_state *st = state;
326
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500327 if (st->version == 1) {
328 xen_pfn_t mfnp = *((xen_pfn_t *) data);
329 if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
330 return __put_user(mfnp, st->user_mfn++);
331 else
332 st->user_mfn++;
333 } else { /* st->version == 2 */
334 int err = *((int *) data);
335 if (err)
336 return __put_user(err, st->user_err++);
337 else
338 st->user_err++;
339 }
340
341 return 0;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800342}
343
Mukesh Rathord71f5132012-10-17 17:11:21 -0700344/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
345 * the vma with the page info to use later.
346 * Returns: 0 if success, otherwise -errno
347 */
348static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
349{
350 int rc;
351 struct page **pages;
352
353 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
354 if (pages == NULL)
355 return -ENOMEM;
356
357 rc = alloc_xenballooned_pages(numpgs, pages, 0);
358 if (rc != 0) {
359 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
360 numpgs, rc);
361 kfree(pages);
362 return -ENOMEM;
363 }
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100364 BUG_ON(vma->vm_private_data != NULL);
Mukesh Rathord71f5132012-10-17 17:11:21 -0700365 vma->vm_private_data = pages;
366
367 return 0;
368}
369
Jeremy Fitzhardingef31fdf52009-03-08 04:10:00 -0700370static struct vm_operations_struct privcmd_vm_ops;
371
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400372static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800373{
374 int ret;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400375 struct privcmd_mmapbatch_v2 m;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800376 struct mm_struct *mm = current->mm;
377 struct vm_area_struct *vma;
378 unsigned long nr_pages;
379 LIST_HEAD(pagelist);
380 struct mmap_batch_state state;
381
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400382 switch (version) {
383 case 1:
384 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
385 return -EFAULT;
386 /* Returns per-frame error in m.arr. */
387 m.err = NULL;
388 if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
389 return -EFAULT;
390 break;
391 case 2:
392 if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
393 return -EFAULT;
394 /* Returns per-frame error code in m.err. */
395 if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
396 return -EFAULT;
397 break;
398 default:
399 return -EINVAL;
400 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800401
402 nr_pages = m.num;
403 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
404 return -EINVAL;
405
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400406 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800407
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400408 if (ret)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800409 goto out;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400410 if (list_empty(&pagelist)) {
411 ret = -EINVAL;
412 goto out;
413 }
414
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500415 if (version == 2) {
416 /* Zero error array now to only copy back actual errors. */
417 if (clear_user(m.err, sizeof(int) * m.num)) {
418 ret = -EFAULT;
419 goto out;
420 }
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400421 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800422
423 down_write(&mm->mmap_sem);
424
425 vma = find_vma(mm, m.addr);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800426 if (!vma ||
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100427 vma->vm_ops != &privcmd_vm_ops) {
Mats Petersson68fa9652012-11-16 18:36:49 +0000428 ret = -EINVAL;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100429 goto out_unlock;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800430 }
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100431
432 /*
433 * Caller must either:
434 *
435 * Map the whole VMA range, which will also allocate all the
436 * pages required for the auto_translated_physmap case.
437 *
438 * Or
439 *
440 * Map unmapped holes left from a previous map attempt (e.g.,
441 * because those foreign frames were previously paged out).
442 */
443 if (vma->vm_private_data == NULL) {
444 if (m.addr != vma->vm_start ||
445 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
446 ret = -EINVAL;
447 goto out_unlock;
448 }
449 if (xen_feature(XENFEAT_auto_translated_physmap)) {
450 ret = alloc_empty_pages(vma, m.num);
451 if (ret < 0)
452 goto out_unlock;
453 } else
454 vma->vm_private_data = PRIV_VMA_LOCKED;
455 } else {
456 if (m.addr < vma->vm_start ||
457 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
458 ret = -EINVAL;
459 goto out_unlock;
460 }
461 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
462 ret = -EINVAL;
463 goto out_unlock;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700464 }
465 }
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800466
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400467 state.domain = m.dom;
468 state.vma = vma;
469 state.va = m.addr;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700470 state.index = 0;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400471 state.global_error = 0;
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500472 state.version = version;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800473
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400474 /* mmap_batch_fn guarantees ret == 0 */
475 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
476 &pagelist, mmap_batch_fn, &state));
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800477
478 up_write(&mm->mmap_sem);
479
Andres Lagar-Cavilla99beae62013-01-14 22:35:40 -0500480 if (state.global_error) {
481 /* Write back errors in second pass. */
482 state.user_mfn = (xen_pfn_t *)m.arr;
483 state.user_err = m.err;
484 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
485 &pagelist, mmap_return_errors, &state);
486 } else
487 ret = 0;
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400488
489 /* If we have not had any EFAULT-like global errors then set the global
490 * error to -ENOENT if necessary. */
491 if ((ret == 0) && (state.global_error == -ENOENT))
492 ret = -ENOENT;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800493
494out:
495 free_page_list(&pagelist);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800496 return ret;
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100497
498out_unlock:
499 up_write(&mm->mmap_sem);
500 goto out;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800501}
502
503static long privcmd_ioctl(struct file *file,
504 unsigned int cmd, unsigned long data)
505{
506 int ret = -ENOSYS;
507 void __user *udata = (void __user *) data;
508
509 switch (cmd) {
510 case IOCTL_PRIVCMD_HYPERCALL:
511 ret = privcmd_ioctl_hypercall(udata);
512 break;
513
514 case IOCTL_PRIVCMD_MMAP:
515 ret = privcmd_ioctl_mmap(udata);
516 break;
517
518 case IOCTL_PRIVCMD_MMAPBATCH:
Andres Lagar-Cavillaceb90fa2012-08-31 09:59:30 -0400519 ret = privcmd_ioctl_mmap_batch(udata, 1);
520 break;
521
522 case IOCTL_PRIVCMD_MMAPBATCH_V2:
523 ret = privcmd_ioctl_mmap_batch(udata, 2);
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800524 break;
525
526 default:
527 ret = -EINVAL;
528 break;
529 }
530
531 return ret;
532}
533
Mukesh Rathord71f5132012-10-17 17:11:21 -0700534static void privcmd_close(struct vm_area_struct *vma)
535{
536 struct page **pages = vma->vm_private_data;
537 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
Ian Campbellb6497b32013-12-06 17:55:56 +0000538 int rc;
Mukesh Rathord71f5132012-10-17 17:11:21 -0700539
Dan Carpenter9eff37a2012-11-05 09:42:17 +0300540 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
Mukesh Rathord71f5132012-10-17 17:11:21 -0700541 return;
542
Ian Campbellb6497b32013-12-06 17:55:56 +0000543 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
544 if (rc == 0)
545 free_xenballooned_pages(numpgs, pages);
546 else
547 pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
548 numpgs, rc);
Mukesh Rathord71f5132012-10-17 17:11:21 -0700549 kfree(pages);
550}
551
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800552static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
553{
Jeremy Fitzhardinge441c7412009-03-06 09:56:59 -0800554 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
555 vma, vma->vm_start, vma->vm_end,
556 vmf->pgoff, vmf->virtual_address);
557
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800558 return VM_FAULT_SIGBUS;
559}
560
561static struct vm_operations_struct privcmd_vm_ops = {
Mukesh Rathord71f5132012-10-17 17:11:21 -0700562 .close = privcmd_close,
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800563 .fault = privcmd_fault
564};
565
566static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
567{
Stefano Stabellinie060e7af2010-11-11 12:37:43 -0800568 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
569 * how to recreate these mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -0700570 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
571 VM_DONTEXPAND | VM_DONTDUMP;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800572 vma->vm_ops = &privcmd_vm_ops;
573 vma->vm_private_data = NULL;
574
575 return 0;
576}
577
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100578/*
579 * For MMAPBATCH*. This allows asserting the singleshot mapping
580 * on a per pfn/pte basis. Mapping calls that fail with ENOENT
581 * can be then retried until success.
582 */
583static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
584 unsigned long addr, void *data)
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800585{
Andres Lagar-Cavillaa5deabe2013-08-23 18:10:06 +0100586 return pte_none(*pte) ? 0 : -EBUSY;
587}
588
589static int privcmd_vma_range_is_mapped(
590 struct vm_area_struct *vma,
591 unsigned long addr,
592 unsigned long nr_pages)
593{
594 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
595 is_mapped_fn, NULL) != 0;
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800596}
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800597
Bastian Blankd8414d32011-12-16 11:34:33 -0500598const struct file_operations xen_privcmd_fops = {
599 .owner = THIS_MODULE,
Jeremy Fitzhardinge1c5de192009-02-09 12:05:49 -0800600 .unlocked_ioctl = privcmd_ioctl,
601 .mmap = privcmd_mmap,
602};
Bastian Blankd8414d32011-12-16 11:34:33 -0500603EXPORT_SYMBOL_GPL(xen_privcmd_fops);
604
605static struct miscdevice privcmd_dev = {
606 .minor = MISC_DYNAMIC_MINOR,
607 .name = "xen/privcmd",
608 .fops = &xen_privcmd_fops,
609};
610
611static int __init privcmd_init(void)
612{
613 int err;
614
615 if (!xen_domain())
616 return -ENODEV;
617
618 err = misc_register(&privcmd_dev);
619 if (err != 0) {
Joe Perches283c0972013-06-28 03:21:41 -0700620 pr_err("Could not register Xen privcmd device\n");
Bastian Blankd8414d32011-12-16 11:34:33 -0500621 return err;
622 }
623 return 0;
624}
625
626static void __exit privcmd_exit(void)
627{
628 misc_deregister(&privcmd_dev);
629}
630
631module_init(privcmd_init);
632module_exit(privcmd_exit);