blob: ae3cb638f2201f5f56506a2cf26204c7ebdd85ca [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * access guest memory
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020019#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020
Thomas Huth732e5632013-09-12 10:33:47 +020021/* Convert real to absolute address by applying the prefix of the CPU */
22static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
23 unsigned long gaddr)
24{
25 unsigned long prefix = vcpu->arch.sie_block->prefix;
26 if (gaddr < 2 * PAGE_SIZE)
27 gaddr += prefix;
28 else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE)
29 gaddr -= prefix;
30 return gaddr;
31}
32
Heiko Carstens072c9872014-01-01 16:21:47 +010033/**
34 * kvm_s390_logical_to_effective - convert guest logical to effective address
35 * @vcpu: guest virtual cpu
36 * @ga: guest logical address
37 *
38 * Convert a guest vcpu logical address to a guest vcpu effective address by
39 * applying the rules of the vcpu's addressing mode defined by PSW bits 31
40 * and 32 (extendended/basic addressing mode).
41 *
42 * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
43 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
44 * of @ga will be zeroed and the remaining bits will be returned.
45 */
46static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
47 unsigned long ga)
48{
49 psw_t *psw = &vcpu->arch.sie_block->gpsw;
50
51 if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
52 return ga;
53 if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
54 return ga & ((1UL << 31) - 1);
55 return ga & ((1UL << 24) - 1);
56}
57
Heiko Carstens0a75ca22013-03-05 13:14:47 +010058static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
59 void __user *gptr,
60 int prefixing)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061{
Heiko Carstens396083a2013-03-05 13:14:44 +010062 unsigned long gaddr = (unsigned long) gptr;
63 unsigned long uaddr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010064
Thomas Huth732e5632013-09-12 10:33:47 +020065 if (prefixing)
66 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
Heiko Carstens396083a2013-03-05 13:14:44 +010067 uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
68 if (IS_ERR_VALUE(uaddr))
69 uaddr = -EFAULT;
Heiko Carstens0a75ca22013-03-05 13:14:47 +010070 return (void __user *)uaddr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010071}
72
Heiko Carstens396083a2013-03-05 13:14:44 +010073#define get_guest(vcpu, x, gptr) \
74({ \
Heiko Carstensf9dc72e2013-03-05 13:14:45 +010075 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
Heiko Carstens396083a2013-03-05 13:14:44 +010076 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
Rusty Russell228b8222013-07-15 11:21:32 +093077 int __ret; \
Heiko Carstens396083a2013-03-05 13:14:44 +010078 \
Rusty Russell228b8222013-07-15 11:21:32 +093079 if (IS_ERR((void __force *)__uptr)) { \
80 __ret = PTR_ERR((void __force *)__uptr); \
81 } else { \
Heiko Carstens396083a2013-03-05 13:14:44 +010082 BUG_ON((unsigned long)__uptr & __mask); \
83 __ret = get_user(x, __uptr); \
84 } \
85 __ret; \
86})
Heiko Carstensb0c632d2008-03-25 18:47:20 +010087
Heiko Carstens396083a2013-03-05 13:14:44 +010088#define put_guest(vcpu, x, gptr) \
89({ \
Heiko Carstensf9dc72e2013-03-05 13:14:45 +010090 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
Heiko Carstens396083a2013-03-05 13:14:44 +010091 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
Rusty Russell228b8222013-07-15 11:21:32 +093092 int __ret; \
Heiko Carstens396083a2013-03-05 13:14:44 +010093 \
Rusty Russell228b8222013-07-15 11:21:32 +093094 if (IS_ERR((void __force *)__uptr)) { \
95 __ret = PTR_ERR((void __force *)__uptr); \
96 } else { \
Heiko Carstens396083a2013-03-05 13:14:44 +010097 BUG_ON((unsigned long)__uptr & __mask); \
98 __ret = put_user(x, __uptr); \
99 } \
100 __ret; \
101})
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100103static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
104 unsigned long from, unsigned long len,
105 int to_guest, int prefixing)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100106{
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100107 unsigned long _len, rc;
Heiko Carstens0a75ca22013-03-05 13:14:47 +0100108 void __user *uptr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100109
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100110 while (len) {
Heiko Carstens0a75ca22013-03-05 13:14:47 +0100111 uptr = to_guest ? (void __user *)to : (void __user *)from;
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100112 uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
Heiko Carstens0a75ca22013-03-05 13:14:47 +0100113 if (IS_ERR((void __force *)uptr))
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100114 return -EFAULT;
115 _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
116 _len = min(_len, len);
117 if (to_guest)
Heiko Carstens0a75ca22013-03-05 13:14:47 +0100118 rc = copy_to_user((void __user *) uptr, (void *)from, _len);
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100119 else
Heiko Carstens0a75ca22013-03-05 13:14:47 +0100120 rc = copy_from_user((void *)to, (void __user *)uptr, _len);
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100121 if (rc)
122 return -EFAULT;
123 len -= _len;
124 from += _len;
125 to += _len;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100126 }
127 return 0;
128}
129
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100130#define copy_to_guest(vcpu, to, from, size) \
131 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
132#define copy_from_guest(vcpu, to, from, size) \
133 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
134#define copy_to_guest_absolute(vcpu, to, from, size) \
135 __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
136#define copy_from_guest_absolute(vcpu, to, from, size) \
137 __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
Carsten Otte092670c2011-07-24 10:48:22 +0200138
Heiko Carstensf9dc72e2013-03-05 13:14:45 +0100139#endif /* __KVM_S390_GACCESS_H */