blob: 2a2e35416d2fe7fc4a835795f5c64bca444f1de4 [file] [log] [blame]
Heiko Carstens88df1252009-06-12 10:26:42 +02001/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
Michael Holzheu7f0bf652011-10-30 15:16:39 +010014#include <linux/gfp.h>
Michael Holzheub2a68c22012-05-09 16:27:36 +020015#include <linux/cpu.h>
David Howellsa0616cd2012-03-28 18:30:02 +010016#include <asm/ctl_reg.h>
Heiko Carstens63df41d62013-09-06 19:10:48 +020017#include <asm/io.h>
Heiko Carstens88df1252009-06-12 10:26:42 +020018
19/*
20 * This function writes to kernel memory bypassing DAT and possible
21 * write protection. It copies one to four bytes from src to dst
22 * using the stura instruction.
23 * Returns the number of bytes copied or -EFAULT.
24 */
Steven Rostedtf29c5042011-05-19 14:35:33 -040025static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
Heiko Carstens88df1252009-06-12 10:26:42 +020026{
27 unsigned long count, aligned;
28 int offset, mask;
29 int rc = -EFAULT;
30
31 aligned = (unsigned long) dst & ~3UL;
32 offset = (unsigned long) dst & 3;
33 count = min_t(unsigned long, 4 - offset, size);
34 mask = (0xf << (4 - count)) & 0xf;
35 mask >>= offset;
36 asm volatile(
37 " bras 1,0f\n"
38 " icm 0,0,0(%3)\n"
39 "0: l 0,0(%1)\n"
40 " lra %1,0(%1)\n"
41 "1: ex %2,0(1)\n"
42 "2: stura 0,%1\n"
43 " la %0,0\n"
44 "3:\n"
45 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
46 : "+d" (rc), "+a" (aligned)
47 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
48 return rc ? rc : count;
49}
50
Steven Rostedtf29c5042011-05-19 14:35:33 -040051long probe_kernel_write(void *dst, const void *src, size_t size)
Heiko Carstens88df1252009-06-12 10:26:42 +020052{
53 long copied = 0;
54
55 while (size) {
56 copied = probe_kernel_write_odd(dst, src, size);
57 if (copied < 0)
58 break;
59 dst += copied;
60 src += copied;
61 size -= copied;
62 }
63 return copied < 0 ? -EFAULT : 0;
64}
Michael Holzheu92fe3132010-03-24 11:49:50 +010065
Michael Holzheub785e0d2012-04-11 14:28:06 +020066static int __memcpy_real(void *dest, void *src, size_t count)
Michael Holzheu92fe3132010-03-24 11:49:50 +010067{
68 register unsigned long _dest asm("2") = (unsigned long) dest;
69 register unsigned long _len1 asm("3") = (unsigned long) count;
70 register unsigned long _src asm("4") = (unsigned long) src;
71 register unsigned long _len2 asm("5") = (unsigned long) count;
Michael Holzheu92fe3132010-03-24 11:49:50 +010072 int rc = -EFAULT;
73
Michael Holzheu92fe3132010-03-24 11:49:50 +010074 asm volatile (
75 "0: mvcle %1,%2,0x0\n"
76 "1: jo 0b\n"
77 " lhi %0,0x0\n"
78 "2:\n"
79 EX_TABLE(1b,2b)
80 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
81 "+d" (_len2), "=m" (*((long *) dest))
82 : "m" (*((long *) src))
83 : "cc", "memory");
Michael Holzheub785e0d2012-04-11 14:28:06 +020084 return rc;
85}
86
87/*
88 * Copy memory in real mode (kernel to kernel)
89 */
90int memcpy_real(void *dest, void *src, size_t count)
91{
92 unsigned long flags;
93 int rc;
94
95 if (!count)
96 return 0;
97 local_irq_save(flags);
98 __arch_local_irq_stnsm(0xfbUL);
99 rc = __memcpy_real(dest, src, count);
100 local_irq_restore(flags);
Michael Holzheu92fe3132010-03-24 11:49:50 +0100101 return rc;
102}
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200103
104/*
Michael Holzheu73bf4632012-05-24 14:35:16 +0200105 * Copy memory in absolute mode (kernel to kernel)
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200106 */
Michael Holzheu73bf4632012-05-24 14:35:16 +0200107void memcpy_absolute(void *dest, void *src, size_t count)
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200108{
Michael Holzheu73bf4632012-05-24 14:35:16 +0200109 unsigned long cr0, flags, prefix;
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200110
Michael Holzheu73bf4632012-05-24 14:35:16 +0200111 flags = arch_local_irq_save();
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200112 __ctl_store(cr0, 0, 0);
113 __ctl_clear_bit(0, 28); /* disable lowcore protection */
Michael Holzheu73bf4632012-05-24 14:35:16 +0200114 prefix = store_prefix();
115 if (prefix) {
116 local_mcck_disable();
117 set_prefix(0);
118 memcpy(dest, src, count);
119 set_prefix(prefix);
120 local_mcck_enable();
121 } else {
122 memcpy(dest, src, count);
123 }
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200124 __ctl_load(cr0, 0, 0);
Michael Holzheu73bf4632012-05-24 14:35:16 +0200125 arch_local_irq_restore(flags);
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200126}
Michael Holzheu7f0bf652011-10-30 15:16:39 +0100127
128/*
129 * Copy memory from kernel (real) to user (virtual)
130 */
Heiko Carstens211deca2014-01-24 12:51:27 +0100131int copy_to_user_real(void __user *dest, void *src, unsigned long count)
Michael Holzheu7f0bf652011-10-30 15:16:39 +0100132{
133 int offs = 0, size, rc;
134 char *buf;
135
136 buf = (char *) __get_free_page(GFP_KERNEL);
137 if (!buf)
138 return -ENOMEM;
139 rc = -EFAULT;
140 while (offs < count) {
141 size = min(PAGE_SIZE, count - offs);
142 if (memcpy_real(buf, src + offs, size))
143 goto out;
144 if (copy_to_user(dest + offs, buf, size))
145 goto out;
146 offs += size;
147 }
148 rc = 0;
149out:
150 free_page((unsigned long) buf);
151 return rc;
152}
153
154/*
Michael Holzheub2a68c22012-05-09 16:27:36 +0200155 * Check if physical address is within prefix or zero page
156 */
157static int is_swapped(unsigned long addr)
158{
159 unsigned long lc;
160 int cpu;
161
162 if (addr < sizeof(struct _lowcore))
163 return 1;
164 for_each_online_cpu(cpu) {
165 lc = (unsigned long) lowcore_ptr[cpu];
166 if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
167 continue;
168 return 1;
169 }
170 return 0;
171}
172
173/*
Michael Holzheub2a68c22012-05-09 16:27:36 +0200174 * Convert a physical pointer for /dev/mem access
175 *
176 * For swapped prefix pages a new buffer is returned that contains a copy of
177 * the absolute memory. The buffer size is maximum one page large.
178 */
179void *xlate_dev_mem_ptr(unsigned long addr)
180{
181 void *bounce = (void *) addr;
182 unsigned long size;
183
184 get_online_cpus();
185 preempt_disable();
186 if (is_swapped(addr)) {
187 size = PAGE_SIZE - (addr & ~PAGE_MASK);
188 bounce = (void *) __get_free_page(GFP_ATOMIC);
189 if (bounce)
Michael Holzheu73bf4632012-05-24 14:35:16 +0200190 memcpy_absolute(bounce, (void *) addr, size);
Michael Holzheub2a68c22012-05-09 16:27:36 +0200191 }
192 preempt_enable();
193 put_online_cpus();
194 return bounce;
195}
196
197/*
198 * Free converted buffer for /dev/mem access (if necessary)
199 */
200void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
201{
202 if ((void *) addr != buf)
203 free_page((unsigned long) buf);
204}