blob: ed60f3a74a85f1ca6a8e994a974fc2bba6dd1652 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * gaccess.h - access guest memory
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
13#ifndef __KVM_S390_GACCESS_H
14#define __KVM_S390_GACCESS_H
15
16#include <linux/compiler.h>
17#include <linux/kvm_host.h>
18#include <asm/uaccess.h>
19
20static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
Martin Schwidefsky00963692008-07-25 15:51:00 +020021 unsigned long guestaddr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022{
Martin Schwidefsky00963692008-07-25 15:51:00 +020023 unsigned long prefix = vcpu->arch.sie_block->prefix;
24 unsigned long origin = vcpu->kvm->arch.guest_origin;
25 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026
27 if (guestaddr < 2 * PAGE_SIZE)
28 guestaddr += prefix;
29 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
30 guestaddr -= prefix;
31
32 if (guestaddr > memsize)
33 return (void __user __force *) ERR_PTR(-EFAULT);
34
35 guestaddr += origin;
36
37 return (void __user *) guestaddr;
38}
39
Martin Schwidefsky00963692008-07-25 15:51:00 +020040static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041 u64 *result)
42{
43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
44
45 BUG_ON(guestaddr & 7);
46
47 if (IS_ERR((void __force *) uptr))
48 return PTR_ERR((void __force *) uptr);
49
Martin Schwidefsky00963692008-07-25 15:51:00 +020050 return get_user(*result, (unsigned long __user *) uptr);
Heiko Carstensb0c632d2008-03-25 18:47:20 +010051}
52
Martin Schwidefsky00963692008-07-25 15:51:00 +020053static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010054 u32 *result)
55{
56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
57
58 BUG_ON(guestaddr & 3);
59
60 if (IS_ERR((void __force *) uptr))
61 return PTR_ERR((void __force *) uptr);
62
63 return get_user(*result, (u32 __user *) uptr);
64}
65
Martin Schwidefsky00963692008-07-25 15:51:00 +020066static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010067 u16 *result)
68{
69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
70
71 BUG_ON(guestaddr & 1);
72
73 if (IS_ERR(uptr))
74 return PTR_ERR(uptr);
75
76 return get_user(*result, (u16 __user *) uptr);
77}
78
Martin Schwidefsky00963692008-07-25 15:51:00 +020079static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010080 u8 *result)
81{
82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
83
84 if (IS_ERR((void __force *) uptr))
85 return PTR_ERR((void __force *) uptr);
86
87 return get_user(*result, (u8 __user *) uptr);
88}
89
Martin Schwidefsky00963692008-07-25 15:51:00 +020090static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +010091 u64 value)
92{
93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
94
95 BUG_ON(guestaddr & 7);
96
97 if (IS_ERR((void __force *) uptr))
98 return PTR_ERR((void __force *) uptr);
99
100 return put_user(value, (u64 __user *) uptr);
101}
102
Martin Schwidefsky00963692008-07-25 15:51:00 +0200103static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104 u32 value)
105{
106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
107
108 BUG_ON(guestaddr & 3);
109
110 if (IS_ERR((void __force *) uptr))
111 return PTR_ERR((void __force *) uptr);
112
113 return put_user(value, (u32 __user *) uptr);
114}
115
Martin Schwidefsky00963692008-07-25 15:51:00 +0200116static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100117 u16 value)
118{
119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
120
121 BUG_ON(guestaddr & 1);
122
123 if (IS_ERR((void __force *) uptr))
124 return PTR_ERR((void __force *) uptr);
125
126 return put_user(value, (u16 __user *) uptr);
127}
128
Martin Schwidefsky00963692008-07-25 15:51:00 +0200129static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130 u8 value)
131{
132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
133
134 if (IS_ERR((void __force *) uptr))
135 return PTR_ERR((void __force *) uptr);
136
137 return put_user(value, (u8 __user *) uptr);
138}
139
140
Martin Schwidefsky00963692008-07-25 15:51:00 +0200141static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
142 unsigned long guestdest,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100143 const void *from, unsigned long n)
144{
145 int rc;
146 unsigned long i;
147 const u8 *data = from;
148
149 for (i = 0; i < n; i++) {
150 rc = put_guest_u8(vcpu, guestdest++, *(data++));
151 if (rc < 0)
152 return rc;
153 }
154 return 0;
155}
156
Martin Schwidefsky00963692008-07-25 15:51:00 +0200157static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158 const void *from, unsigned long n)
159{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200160 unsigned long prefix = vcpu->arch.sie_block->prefix;
161 unsigned long origin = vcpu->kvm->arch.guest_origin;
162 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163
164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
165 goto slowpath;
166
167 if ((guestdest < prefix) && (guestdest + n > prefix))
168 goto slowpath;
169
170 if ((guestdest < prefix + 2 * PAGE_SIZE)
171 && (guestdest + n > prefix + 2 * PAGE_SIZE))
172 goto slowpath;
173
174 if (guestdest < 2 * PAGE_SIZE)
175 guestdest += prefix;
176 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
177 guestdest -= prefix;
178
179 if (guestdest + n > memsize)
180 return -EFAULT;
181
182 if (guestdest + n < guestdest)
183 return -EFAULT;
184
185 guestdest += origin;
186
187 return copy_to_user((void __user *) guestdest, from, n);
188slowpath:
189 return __copy_to_guest_slow(vcpu, guestdest, from, n);
190}
191
192static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200193 unsigned long guestsrc,
194 unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100195{
196 int rc;
197 unsigned long i;
198 u8 *data = to;
199
200 for (i = 0; i < n; i++) {
201 rc = get_guest_u8(vcpu, guestsrc++, data++);
202 if (rc < 0)
203 return rc;
204 }
205 return 0;
206}
207
208static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200209 unsigned long guestsrc, unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100210{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200211 unsigned long prefix = vcpu->arch.sie_block->prefix;
212 unsigned long origin = vcpu->kvm->arch.guest_origin;
213 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100214
215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
216 goto slowpath;
217
218 if ((guestsrc < prefix) && (guestsrc + n > prefix))
219 goto slowpath;
220
221 if ((guestsrc < prefix + 2 * PAGE_SIZE)
222 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
223 goto slowpath;
224
225 if (guestsrc < 2 * PAGE_SIZE)
226 guestsrc += prefix;
227 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
228 guestsrc -= prefix;
229
230 if (guestsrc + n > memsize)
231 return -EFAULT;
232
233 if (guestsrc + n < guestsrc)
234 return -EFAULT;
235
236 guestsrc += origin;
237
238 return copy_from_user(to, (void __user *) guestsrc, n);
239slowpath:
240 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
241}
242
Martin Schwidefsky00963692008-07-25 15:51:00 +0200243static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
244 unsigned long guestdest,
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100245 const void *from, unsigned long n)
246{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200247 unsigned long origin = vcpu->kvm->arch.guest_origin;
248 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100249
250 if (guestdest + n > memsize)
251 return -EFAULT;
252
253 if (guestdest + n < guestdest)
254 return -EFAULT;
255
256 guestdest += origin;
257
258 return copy_to_user((void __user *) guestdest, from, n);
259}
260
261static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
Martin Schwidefsky00963692008-07-25 15:51:00 +0200262 unsigned long guestsrc,
263 unsigned long n)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100264{
Martin Schwidefsky00963692008-07-25 15:51:00 +0200265 unsigned long origin = vcpu->kvm->arch.guest_origin;
266 unsigned long memsize = vcpu->kvm->arch.guest_memsize;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100267
268 if (guestsrc + n > memsize)
269 return -EFAULT;
270
271 if (guestsrc + n < guestsrc)
272 return -EFAULT;
273
274 guestsrc += origin;
275
276 return copy_from_user(to, (void __user *) guestsrc, n);
277}
278#endif