xen/privcmd: Further s/MFN/GFN/ clean-up
The privcmd code is mixing the usage of GFN and MFN within the same
functions which make the code difficult to understand when you only work
with auto-translated guests.
The privcmd driver is only dealing with GFN so replace all the mention
of MFN into GFN.
The ioctl structure used to map foreign change has been left unchanged
given that the userspace is using it. Nonetheless, add a comment to
explain the expected value within the "mfn" field.
Signed-off-by: Julien Grall <julien.grall@citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5a29616..c6deb87 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -193,16 +193,16 @@
return ret;
}
-struct mmap_mfn_state {
+struct mmap_gfn_state {
unsigned long va;
struct vm_area_struct *vma;
domid_t domain;
};
-static int mmap_mfn_range(void *data, void *state)
+static int mmap_gfn_range(void *data, void *state)
{
struct privcmd_mmap_entry *msg = data;
- struct mmap_mfn_state *st = state;
+ struct mmap_gfn_state *st = state;
struct vm_area_struct *vma = st->vma;
int rc;
@@ -216,7 +216,7 @@
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
return -EINVAL;
- rc = xen_remap_domain_mfn_range(vma,
+ rc = xen_remap_domain_gfn_range(vma,
msg->va & PAGE_MASK,
msg->mfn, msg->npages,
vma->vm_page_prot,
@@ -236,7 +236,7 @@
struct vm_area_struct *vma;
int rc;
LIST_HEAD(pagelist);
- struct mmap_mfn_state state;
+ struct mmap_gfn_state state;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
&pagelist,
- mmap_mfn_range, &state);
+ mmap_gfn_range, &state);
out_up:
@@ -299,18 +299,18 @@
int global_error;
int version;
- /* User-space mfn array to store errors in the second pass for V1. */
- xen_pfn_t __user *user_mfn;
+ /* User-space gfn array to store errors in the second pass for V1. */
+ xen_pfn_t __user *user_gfn;
/* User-space int array to store errors in the second pass for V2. */
int __user *user_err;
};
-/* auto translated dom0 note: if domU being created is PV, then mfn is
- * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
+/* auto translated dom0 note: if domU being created is PV, then gfn is
+ * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
*/
static int mmap_batch_fn(void *data, int nr, void *state)
{
- xen_pfn_t *mfnp = data;
+ xen_pfn_t *gfnp = data;
struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@
cur_pages = &pages[st->index];
BUG_ON(nr < 0);
- ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
- (int *)mfnp, st->vma->vm_page_prot,
+ ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
+ (int *)gfnp, st->vma->vm_page_prot,
st->domain, cur_pages);
/* Adjust the global_error? */
@@ -347,22 +347,22 @@
if (st->version == 1) {
if (err) {
- xen_pfn_t mfn;
+ xen_pfn_t gfn;
- ret = get_user(mfn, st->user_mfn);
+ ret = get_user(gfn, st->user_gfn);
if (ret < 0)
return ret;
/*
* V1 encodes the error codes in the 32bit top
- * nibble of the mfn (with its known
+ * nibble of the gfn (with its known
* limitations vis-a-vis 64 bit callers).
*/
- mfn |= (err == -ENOENT) ?
+ gfn |= (err == -ENOENT) ?
PRIVCMD_MMAPBATCH_PAGED_ERROR :
PRIVCMD_MMAPBATCH_MFN_ERROR;
- return __put_user(mfn, st->user_mfn++);
+ return __put_user(gfn, st->user_gfn++);
} else
- st->user_mfn++;
+ st->user_gfn++;
} else { /* st->version == 2 */
if (err)
return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@
return 0;
}
-/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
+/* Allocate pfns that are then mapped with gfns from foreign domid. Update
* the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno
*/
@@ -526,7 +526,7 @@
if (state.global_error) {
/* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
+ state.user_gfn = (xen_pfn_t *)m.arr;
state.user_err = m.err;
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
if (rc == 0)
free_xenballooned_pages(numpgs, pages);
else
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 58a5389..cff2387 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -38,8 +38,8 @@
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
-/* map fgmfn of domid to lpfn in the current domain */
-static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+/* map fgfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
unsigned int domid)
{
int rc;
@@ -49,7 +49,7 @@
.size = 1,
.space = XENMAPSPACE_gmfn_foreign,
};
- xen_ulong_t idx = fgmfn;
+ xen_ulong_t idx = fgfn;
xen_pfn_t gpfn = lpfn;
int err = 0;
@@ -62,13 +62,13 @@
}
struct remap_data {
- xen_pfn_t *fgmfn; /* foreign domain's gmfn */
+ xen_pfn_t *fgfn; /* foreign domain's gfn */
pgprot_t prot;
domid_t domid;
struct vm_area_struct *vma;
int index;
struct page **pages;
- struct xen_remap_mfn_info *info;
+ struct xen_remap_gfn_info *info;
int *err_ptr;
int mapped;
};
@@ -82,20 +82,20 @@
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
int rc;
- rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
+ rc = map_foreign_page(pfn, *info->fgfn, info->domid);
*info->err_ptr++ = rc;
if (!rc) {
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
info->mapped++;
}
- info->fgmfn++;
+ info->fgfn++;
return 0;
}
int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *mfn, int nr,
+ xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages)
@@ -108,7 +108,7 @@
x86 PVOPS */
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
- data.fgmfn = mfn;
+ data.fgfn = gfn;
data.prot = prot;
data.domid = domid;
data.vma = vma;