Heiko Carstens | b0c632d | 2008-03-25 18:47:20 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * gaccess.h - access guest memory |
| 3 | * |
| 4 | * Copyright IBM Corp. 2008 |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License (version 2 only) |
| 8 | * as published by the Free Software Foundation. |
| 9 | * |
| 10 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
| 11 | */ |
| 12 | |
| 13 | #ifndef __KVM_S390_GACCESS_H |
| 14 | #define __KVM_S390_GACCESS_H |
| 15 | |
| 16 | #include <linux/compiler.h> |
| 17 | #include <linux/kvm_host.h> |
| 18 | #include <asm/uaccess.h> |
| 19 | |
| 20 | static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, |
| 21 | u64 guestaddr) |
| 22 | { |
| 23 | u64 prefix = vcpu->arch.sie_block->prefix; |
| 24 | u64 origin = vcpu->kvm->arch.guest_origin; |
| 25 | u64 memsize = vcpu->kvm->arch.guest_memsize; |
| 26 | |
| 27 | if (guestaddr < 2 * PAGE_SIZE) |
| 28 | guestaddr += prefix; |
| 29 | else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) |
| 30 | guestaddr -= prefix; |
| 31 | |
| 32 | if (guestaddr > memsize) |
| 33 | return (void __user __force *) ERR_PTR(-EFAULT); |
| 34 | |
| 35 | guestaddr += origin; |
| 36 | |
| 37 | return (void __user *) guestaddr; |
| 38 | } |
| 39 | |
| 40 | static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 41 | u64 *result) |
| 42 | { |
| 43 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 44 | |
| 45 | BUG_ON(guestaddr & 7); |
| 46 | |
| 47 | if (IS_ERR((void __force *) uptr)) |
| 48 | return PTR_ERR((void __force *) uptr); |
| 49 | |
| 50 | return get_user(*result, (u64 __user *) uptr); |
| 51 | } |
| 52 | |
| 53 | static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 54 | u32 *result) |
| 55 | { |
| 56 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 57 | |
| 58 | BUG_ON(guestaddr & 3); |
| 59 | |
| 60 | if (IS_ERR((void __force *) uptr)) |
| 61 | return PTR_ERR((void __force *) uptr); |
| 62 | |
| 63 | return get_user(*result, (u32 __user *) uptr); |
| 64 | } |
| 65 | |
| 66 | static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 67 | u16 *result) |
| 68 | { |
| 69 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 70 | |
| 71 | BUG_ON(guestaddr & 1); |
| 72 | |
| 73 | if (IS_ERR(uptr)) |
| 74 | return PTR_ERR(uptr); |
| 75 | |
| 76 | return get_user(*result, (u16 __user *) uptr); |
| 77 | } |
| 78 | |
| 79 | static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 80 | u8 *result) |
| 81 | { |
| 82 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 83 | |
| 84 | if (IS_ERR((void __force *) uptr)) |
| 85 | return PTR_ERR((void __force *) uptr); |
| 86 | |
| 87 | return get_user(*result, (u8 __user *) uptr); |
| 88 | } |
| 89 | |
| 90 | static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 91 | u64 value) |
| 92 | { |
| 93 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 94 | |
| 95 | BUG_ON(guestaddr & 7); |
| 96 | |
| 97 | if (IS_ERR((void __force *) uptr)) |
| 98 | return PTR_ERR((void __force *) uptr); |
| 99 | |
| 100 | return put_user(value, (u64 __user *) uptr); |
| 101 | } |
| 102 | |
| 103 | static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 104 | u32 value) |
| 105 | { |
| 106 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 107 | |
| 108 | BUG_ON(guestaddr & 3); |
| 109 | |
| 110 | if (IS_ERR((void __force *) uptr)) |
| 111 | return PTR_ERR((void __force *) uptr); |
| 112 | |
| 113 | return put_user(value, (u32 __user *) uptr); |
| 114 | } |
| 115 | |
| 116 | static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 117 | u16 value) |
| 118 | { |
| 119 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 120 | |
| 121 | BUG_ON(guestaddr & 1); |
| 122 | |
| 123 | if (IS_ERR((void __force *) uptr)) |
| 124 | return PTR_ERR((void __force *) uptr); |
| 125 | |
| 126 | return put_user(value, (u16 __user *) uptr); |
| 127 | } |
| 128 | |
| 129 | static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, |
| 130 | u8 value) |
| 131 | { |
| 132 | void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); |
| 133 | |
| 134 | if (IS_ERR((void __force *) uptr)) |
| 135 | return PTR_ERR((void __force *) uptr); |
| 136 | |
| 137 | return put_user(value, (u8 __user *) uptr); |
| 138 | } |
| 139 | |
| 140 | |
| 141 | static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, |
| 142 | const void *from, unsigned long n) |
| 143 | { |
| 144 | int rc; |
| 145 | unsigned long i; |
| 146 | const u8 *data = from; |
| 147 | |
| 148 | for (i = 0; i < n; i++) { |
| 149 | rc = put_guest_u8(vcpu, guestdest++, *(data++)); |
| 150 | if (rc < 0) |
| 151 | return rc; |
| 152 | } |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, |
| 157 | const void *from, unsigned long n) |
| 158 | { |
| 159 | u64 prefix = vcpu->arch.sie_block->prefix; |
| 160 | u64 origin = vcpu->kvm->arch.guest_origin; |
| 161 | u64 memsize = vcpu->kvm->arch.guest_memsize; |
| 162 | |
| 163 | if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) |
| 164 | goto slowpath; |
| 165 | |
| 166 | if ((guestdest < prefix) && (guestdest + n > prefix)) |
| 167 | goto slowpath; |
| 168 | |
| 169 | if ((guestdest < prefix + 2 * PAGE_SIZE) |
| 170 | && (guestdest + n > prefix + 2 * PAGE_SIZE)) |
| 171 | goto slowpath; |
| 172 | |
| 173 | if (guestdest < 2 * PAGE_SIZE) |
| 174 | guestdest += prefix; |
| 175 | else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) |
| 176 | guestdest -= prefix; |
| 177 | |
| 178 | if (guestdest + n > memsize) |
| 179 | return -EFAULT; |
| 180 | |
| 181 | if (guestdest + n < guestdest) |
| 182 | return -EFAULT; |
| 183 | |
| 184 | guestdest += origin; |
| 185 | |
| 186 | return copy_to_user((void __user *) guestdest, from, n); |
| 187 | slowpath: |
| 188 | return __copy_to_guest_slow(vcpu, guestdest, from, n); |
| 189 | } |
| 190 | |
| 191 | static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, |
| 192 | u64 guestsrc, unsigned long n) |
| 193 | { |
| 194 | int rc; |
| 195 | unsigned long i; |
| 196 | u8 *data = to; |
| 197 | |
| 198 | for (i = 0; i < n; i++) { |
| 199 | rc = get_guest_u8(vcpu, guestsrc++, data++); |
| 200 | if (rc < 0) |
| 201 | return rc; |
| 202 | } |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, |
| 207 | u64 guestsrc, unsigned long n) |
| 208 | { |
| 209 | u64 prefix = vcpu->arch.sie_block->prefix; |
| 210 | u64 origin = vcpu->kvm->arch.guest_origin; |
| 211 | u64 memsize = vcpu->kvm->arch.guest_memsize; |
| 212 | |
| 213 | if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) |
| 214 | goto slowpath; |
| 215 | |
| 216 | if ((guestsrc < prefix) && (guestsrc + n > prefix)) |
| 217 | goto slowpath; |
| 218 | |
| 219 | if ((guestsrc < prefix + 2 * PAGE_SIZE) |
| 220 | && (guestsrc + n > prefix + 2 * PAGE_SIZE)) |
| 221 | goto slowpath; |
| 222 | |
| 223 | if (guestsrc < 2 * PAGE_SIZE) |
| 224 | guestsrc += prefix; |
| 225 | else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) |
| 226 | guestsrc -= prefix; |
| 227 | |
| 228 | if (guestsrc + n > memsize) |
| 229 | return -EFAULT; |
| 230 | |
| 231 | if (guestsrc + n < guestsrc) |
| 232 | return -EFAULT; |
| 233 | |
| 234 | guestsrc += origin; |
| 235 | |
| 236 | return copy_from_user(to, (void __user *) guestsrc, n); |
| 237 | slowpath: |
| 238 | return __copy_from_guest_slow(vcpu, to, guestsrc, n); |
| 239 | } |
| 240 | |
| 241 | static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, |
| 242 | const void *from, unsigned long n) |
| 243 | { |
| 244 | u64 origin = vcpu->kvm->arch.guest_origin; |
| 245 | u64 memsize = vcpu->kvm->arch.guest_memsize; |
| 246 | |
| 247 | if (guestdest + n > memsize) |
| 248 | return -EFAULT; |
| 249 | |
| 250 | if (guestdest + n < guestdest) |
| 251 | return -EFAULT; |
| 252 | |
| 253 | guestdest += origin; |
| 254 | |
| 255 | return copy_to_user((void __user *) guestdest, from, n); |
| 256 | } |
| 257 | |
| 258 | static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, |
| 259 | u64 guestsrc, unsigned long n) |
| 260 | { |
| 261 | u64 origin = vcpu->kvm->arch.guest_origin; |
| 262 | u64 memsize = vcpu->kvm->arch.guest_memsize; |
| 263 | |
| 264 | if (guestsrc + n > memsize) |
| 265 | return -EFAULT; |
| 266 | |
| 267 | if (guestsrc + n < guestsrc) |
| 268 | return -EFAULT; |
| 269 | |
| 270 | guestsrc += origin; |
| 271 | |
| 272 | return copy_from_user(to, (void __user *) guestsrc, n); |
| 273 | } |
| 274 | #endif |