blob: 03c716a0f01f9d6b9c295dd8f31123660565223e [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * gaccess.h - access guest memory
3 *
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +02004 * Copyright IBM Corp. 2008,2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020019#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020
21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
Martin Schwidefsky00963692008-07-25 15:51:00 +020022 unsigned long guestaddr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010023{
Martin Schwidefsky00963692008-07-25 15:51:00 +020024 unsigned long prefix = vcpu->arch.sie_block->prefix;
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020025 unsigned long origin = vcpu->arch.sie_block->gmsor;
26 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +010027
28 if (guestaddr < 2 * PAGE_SIZE)
29 guestaddr += prefix;
30 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
31 guestaddr -= prefix;
32
33 if (guestaddr > memsize)
34 return (void __user __force *) ERR_PTR(-EFAULT);
35
36 guestaddr += origin;
37
38 return (void __user *) guestaddr;
39}
40
Martin Schwidefsky00963692008-07-25 15:51:00 +020041static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042 u64 *result)
43{
44 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
45
46 BUG_ON(guestaddr & 7);
47
48 if (IS_ERR((void __force *) uptr))
49 return PTR_ERR((void __force *) uptr);
50
Martin Schwidefsky00963692008-07-25 15:51:00 +020051 return get_user(*result, (unsigned long __user *) uptr);
Heiko Carstensb0c632d2008-03-25 18:47:20 +010052}
53
Martin Schwidefsky00963692008-07-25 15:51:00 +020054static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010055 u32 *result)
56{
57 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
58
59 BUG_ON(guestaddr & 3);
60
61 if (IS_ERR((void __force *) uptr))
62 return PTR_ERR((void __force *) uptr);
63
64 return get_user(*result, (u32 __user *) uptr);
65}
66
Martin Schwidefsky00963692008-07-25 15:51:00 +020067static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010068 u16 *result)
69{
70 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
71
72 BUG_ON(guestaddr & 1);
73
74 if (IS_ERR(uptr))
75 return PTR_ERR(uptr);
76
77 return get_user(*result, (u16 __user *) uptr);
78}
79
Martin Schwidefsky00963692008-07-25 15:51:00 +020080static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010081 u8 *result)
82{
83 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
84
85 if (IS_ERR((void __force *) uptr))
86 return PTR_ERR((void __force *) uptr);
87
88 return get_user(*result, (u8 __user *) uptr);
89}
90
Martin Schwidefsky00963692008-07-25 15:51:00 +020091static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010092 u64 value)
93{
94 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
95
96 BUG_ON(guestaddr & 7);
97
98 if (IS_ERR((void __force *) uptr))
99 return PTR_ERR((void __force *) uptr);
100
101 return put_user(value, (u64 __user *) uptr);
102}
103
Martin Schwidefsky00963692008-07-25 15:51:00 +0200104static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100105 u32 value)
106{
107 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
108
109 BUG_ON(guestaddr & 3);
110
111 if (IS_ERR((void __force *) uptr))
112 return PTR_ERR((void __force *) uptr);
113
114 return put_user(value, (u32 __user *) uptr);
115}
116
Martin Schwidefsky00963692008-07-25 15:51:00 +0200117static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100118 u16 value)
119{
120 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
121
122 BUG_ON(guestaddr & 1);
123
124 if (IS_ERR((void __force *) uptr))
125 return PTR_ERR((void __force *) uptr);
126
127 return put_user(value, (u16 __user *) uptr);
128}
129
Martin Schwidefsky00963692008-07-25 15:51:00 +0200130static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131 u8 value)
132{
133 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
134
135 if (IS_ERR((void __force *) uptr))
136 return PTR_ERR((void __force *) uptr);
137
138 return put_user(value, (u8 __user *) uptr);
139}
140
141
Martin Schwidefsky00963692008-07-25 15:51:00 +0200142static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
143 unsigned long guestdest,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100144 const void *from, unsigned long n)
145{
146 int rc;
147 unsigned long i;
148 const u8 *data = from;
149
150 for (i = 0; i < n; i++) {
151 rc = put_guest_u8(vcpu, guestdest++, *(data++));
152 if (rc < 0)
153 return rc;
154 }
155 return 0;
156}
157
Martin Schwidefsky00963692008-07-25 15:51:00 +0200158static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100159 const void *from, unsigned long n)
160{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200161 unsigned long prefix = vcpu->arch.sie_block->prefix;
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200162 unsigned long origin = vcpu->arch.sie_block->gmsor;
163 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100164
165 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
166 goto slowpath;
167
168 if ((guestdest < prefix) && (guestdest + n > prefix))
169 goto slowpath;
170
171 if ((guestdest < prefix + 2 * PAGE_SIZE)
172 && (guestdest + n > prefix + 2 * PAGE_SIZE))
173 goto slowpath;
174
175 if (guestdest < 2 * PAGE_SIZE)
176 guestdest += prefix;
177 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
178 guestdest -= prefix;
179
180 if (guestdest + n > memsize)
181 return -EFAULT;
182
183 if (guestdest + n < guestdest)
184 return -EFAULT;
185
186 guestdest += origin;
187
188 return copy_to_user((void __user *) guestdest, from, n);
189slowpath:
190 return __copy_to_guest_slow(vcpu, guestdest, from, n);
191}
192
193static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200194 unsigned long guestsrc,
195 unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100196{
197 int rc;
198 unsigned long i;
199 u8 *data = to;
200
201 for (i = 0; i < n; i++) {
202 rc = get_guest_u8(vcpu, guestsrc++, data++);
203 if (rc < 0)
204 return rc;
205 }
206 return 0;
207}
208
209static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200210 unsigned long guestsrc, unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100211{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200212 unsigned long prefix = vcpu->arch.sie_block->prefix;
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200213 unsigned long origin = vcpu->arch.sie_block->gmsor;
214 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100215
216 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
217 goto slowpath;
218
219 if ((guestsrc < prefix) && (guestsrc + n > prefix))
220 goto slowpath;
221
222 if ((guestsrc < prefix + 2 * PAGE_SIZE)
223 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
224 goto slowpath;
225
226 if (guestsrc < 2 * PAGE_SIZE)
227 guestsrc += prefix;
228 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
229 guestsrc -= prefix;
230
231 if (guestsrc + n > memsize)
232 return -EFAULT;
233
234 if (guestsrc + n < guestsrc)
235 return -EFAULT;
236
237 guestsrc += origin;
238
239 return copy_from_user(to, (void __user *) guestsrc, n);
240slowpath:
241 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
242}
243
Martin Schwidefsky00963692008-07-25 15:51:00 +0200244static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
245 unsigned long guestdest,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100246 const void *from, unsigned long n)
247{
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200248 unsigned long origin = vcpu->arch.sie_block->gmsor;
249 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250
251 if (guestdest + n > memsize)
252 return -EFAULT;
253
254 if (guestdest + n < guestdest)
255 return -EFAULT;
256
257 guestdest += origin;
258
259 return copy_to_user((void __user *) guestdest, from, n);
260}
261
262static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200263 unsigned long guestsrc,
264 unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100265{
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200266 unsigned long origin = vcpu->arch.sie_block->gmsor;
267 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100268
269 if (guestsrc + n > memsize)
270 return -EFAULT;
271
272 if (guestsrc + n < guestsrc)
273 return -EFAULT;
274
275 guestsrc += origin;
276
277 return copy_from_user(to, (void __user *) guestsrc, n);
278}
279#endif