blob: e1335dc2b1b76fffea016ef83e7d1b229086bf20 [file] [log] [blame]
Heiko Carstens88df1252009-06-12 10:26:42 +02001/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
Michael Holzheu7f0bf652011-10-30 15:16:39 +010014#include <linux/gfp.h>
David Howellsa0616cd2012-03-28 18:30:02 +010015#include <asm/ctl_reg.h>
Heiko Carstens88df1252009-06-12 10:26:42 +020016
17/*
18 * This function writes to kernel memory bypassing DAT and possible
19 * write protection. It copies one to four bytes from src to dst
20 * using the stura instruction.
21 * Returns the number of bytes copied or -EFAULT.
22 */
Steven Rostedtf29c5042011-05-19 14:35:33 -040023static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
Heiko Carstens88df1252009-06-12 10:26:42 +020024{
25 unsigned long count, aligned;
26 int offset, mask;
27 int rc = -EFAULT;
28
29 aligned = (unsigned long) dst & ~3UL;
30 offset = (unsigned long) dst & 3;
31 count = min_t(unsigned long, 4 - offset, size);
32 mask = (0xf << (4 - count)) & 0xf;
33 mask >>= offset;
34 asm volatile(
35 " bras 1,0f\n"
36 " icm 0,0,0(%3)\n"
37 "0: l 0,0(%1)\n"
38 " lra %1,0(%1)\n"
39 "1: ex %2,0(1)\n"
40 "2: stura 0,%1\n"
41 " la %0,0\n"
42 "3:\n"
43 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
44 : "+d" (rc), "+a" (aligned)
45 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
46 return rc ? rc : count;
47}
48
Steven Rostedtf29c5042011-05-19 14:35:33 -040049long probe_kernel_write(void *dst, const void *src, size_t size)
Heiko Carstens88df1252009-06-12 10:26:42 +020050{
51 long copied = 0;
52
53 while (size) {
54 copied = probe_kernel_write_odd(dst, src, size);
55 if (copied < 0)
56 break;
57 dst += copied;
58 src += copied;
59 size -= copied;
60 }
61 return copied < 0 ? -EFAULT : 0;
62}
Michael Holzheu92fe3132010-03-24 11:49:50 +010063
Michael Holzheub785e0d2012-04-11 14:28:06 +020064static int __memcpy_real(void *dest, void *src, size_t count)
Michael Holzheu92fe3132010-03-24 11:49:50 +010065{
66 register unsigned long _dest asm("2") = (unsigned long) dest;
67 register unsigned long _len1 asm("3") = (unsigned long) count;
68 register unsigned long _src asm("4") = (unsigned long) src;
69 register unsigned long _len2 asm("5") = (unsigned long) count;
Michael Holzheu92fe3132010-03-24 11:49:50 +010070 int rc = -EFAULT;
71
Michael Holzheu92fe3132010-03-24 11:49:50 +010072 asm volatile (
73 "0: mvcle %1,%2,0x0\n"
74 "1: jo 0b\n"
75 " lhi %0,0x0\n"
76 "2:\n"
77 EX_TABLE(1b,2b)
78 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
79 "+d" (_len2), "=m" (*((long *) dest))
80 : "m" (*((long *) src))
81 : "cc", "memory");
Michael Holzheub785e0d2012-04-11 14:28:06 +020082 return rc;
83}
84
85/*
86 * Copy memory in real mode (kernel to kernel)
87 */
88int memcpy_real(void *dest, void *src, size_t count)
89{
90 unsigned long flags;
91 int rc;
92
93 if (!count)
94 return 0;
95 local_irq_save(flags);
96 __arch_local_irq_stnsm(0xfbUL);
97 rc = __memcpy_real(dest, src, count);
98 local_irq_restore(flags);
Michael Holzheu92fe3132010-03-24 11:49:50 +010099 return rc;
100}
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200101
102/*
103 * Copy memory to absolute zero
104 */
105void copy_to_absolute_zero(void *dest, void *src, size_t count)
106{
107 unsigned long cr0;
108
109 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore));
110 preempt_disable();
111 __ctl_store(cr0, 0, 0);
112 __ctl_clear_bit(0, 28); /* disable lowcore protection */
113 memcpy_real(dest + store_prefix(), src, count);
114 __ctl_load(cr0, 0, 0);
115 preempt_enable();
116}
Michael Holzheu7f0bf652011-10-30 15:16:39 +0100117
118/*
119 * Copy memory from kernel (real) to user (virtual)
120 */
121int copy_to_user_real(void __user *dest, void *src, size_t count)
122{
123 int offs = 0, size, rc;
124 char *buf;
125
126 buf = (char *) __get_free_page(GFP_KERNEL);
127 if (!buf)
128 return -ENOMEM;
129 rc = -EFAULT;
130 while (offs < count) {
131 size = min(PAGE_SIZE, count - offs);
132 if (memcpy_real(buf, src + offs, size))
133 goto out;
134 if (copy_to_user(dest + offs, buf, size))
135 goto out;
136 offs += size;
137 }
138 rc = 0;
139out:
140 free_page((unsigned long) buf);
141 return rc;
142}
143
144/*
145 * Copy memory from user (virtual) to kernel (real)
146 */
147int copy_from_user_real(void *dest, void __user *src, size_t count)
148{
149 int offs = 0, size, rc;
150 char *buf;
151
152 buf = (char *) __get_free_page(GFP_KERNEL);
153 if (!buf)
154 return -ENOMEM;
155 rc = -EFAULT;
156 while (offs < count) {
157 size = min(PAGE_SIZE, count - offs);
158 if (copy_from_user(buf, src + offs, size))
159 goto out;
160 if (memcpy_real(dest + offs, buf, size))
161 goto out;
162 offs += size;
163 }
164 rc = 0;
165out:
166 free_page((unsigned long) buf);
167 return rc;
168}