blob: fb737e9e068387dc9444a14d87da0f0bb2110f32 [file] [log] [blame]
Heiko Carstens88df1252009-06-12 10:26:42 +02001/*
2 * Access kernel memory without faulting -- s390 specific implementation.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *
8 */
9
10#include <linux/uaccess.h>
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/errno.h>
Michael Holzheu7f0bf652011-10-30 15:16:39 +010014#include <linux/gfp.h>
Michael Holzheub2a68c22012-05-09 16:27:36 +020015#include <linux/cpu.h>
David Howellsa0616cd2012-03-28 18:30:02 +010016#include <asm/ctl_reg.h>
Heiko Carstens63df41d62013-09-06 19:10:48 +020017#include <asm/io.h>
Heiko Carstens88df1252009-06-12 10:26:42 +020018
Heiko Carstens8a5d8472015-03-13 12:55:56 +010019static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
Heiko Carstens88df1252009-06-12 10:26:42 +020020{
21 unsigned long count, aligned;
22 int offset, mask;
23 int rc = -EFAULT;
24
25 aligned = (unsigned long) dst & ~3UL;
26 offset = (unsigned long) dst & 3;
27 count = min_t(unsigned long, 4 - offset, size);
28 mask = (0xf << (4 - count)) & 0xf;
29 mask >>= offset;
30 asm volatile(
31 " bras 1,0f\n"
32 " icm 0,0,0(%3)\n"
33 "0: l 0,0(%1)\n"
34 " lra %1,0(%1)\n"
35 "1: ex %2,0(1)\n"
36 "2: stura 0,%1\n"
37 " la %0,0\n"
38 "3:\n"
39 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
40 : "+d" (rc), "+a" (aligned)
41 : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
42 return rc ? rc : count;
43}
44
Heiko Carstens8a5d8472015-03-13 12:55:56 +010045/*
46 * s390_kernel_write - write to kernel memory bypassing DAT
47 * @dst: destination address
48 * @src: source address
49 * @size: number of bytes to copy
50 *
51 * This function writes to kernel memory bypassing DAT and possible page table
52 * write protection. It writes to the destination using the sturg instruction.
53 * Therefore we have a read-modify-write sequence: the function reads four
54 * bytes from destination at a four byte boundary, modifies the bytes
55 * requested and writes the result back in a loop.
56 *
57 * Note: this means that this function may not be called concurrently on
58 * several cpus with overlapping words, since this may potentially
59 * cause data corruption.
60 */
61void notrace s390_kernel_write(void *dst, const void *src, size_t size)
Heiko Carstens88df1252009-06-12 10:26:42 +020062{
63 long copied = 0;
64
65 while (size) {
Heiko Carstens8a5d8472015-03-13 12:55:56 +010066 copied = s390_kernel_write_odd(dst, src, size);
Heiko Carstens88df1252009-06-12 10:26:42 +020067 if (copied < 0)
68 break;
69 dst += copied;
70 src += copied;
71 size -= copied;
72 }
Heiko Carstens88df1252009-06-12 10:26:42 +020073}
Michael Holzheu92fe3132010-03-24 11:49:50 +010074
Michael Holzheub785e0d2012-04-11 14:28:06 +020075static int __memcpy_real(void *dest, void *src, size_t count)
Michael Holzheu92fe3132010-03-24 11:49:50 +010076{
77 register unsigned long _dest asm("2") = (unsigned long) dest;
78 register unsigned long _len1 asm("3") = (unsigned long) count;
79 register unsigned long _src asm("4") = (unsigned long) src;
80 register unsigned long _len2 asm("5") = (unsigned long) count;
Michael Holzheu92fe3132010-03-24 11:49:50 +010081 int rc = -EFAULT;
82
Michael Holzheu92fe3132010-03-24 11:49:50 +010083 asm volatile (
84 "0: mvcle %1,%2,0x0\n"
85 "1: jo 0b\n"
86 " lhi %0,0x0\n"
87 "2:\n"
88 EX_TABLE(1b,2b)
89 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
90 "+d" (_len2), "=m" (*((long *) dest))
91 : "m" (*((long *) src))
92 : "cc", "memory");
Michael Holzheub785e0d2012-04-11 14:28:06 +020093 return rc;
94}
95
96/*
97 * Copy memory in real mode (kernel to kernel)
98 */
99int memcpy_real(void *dest, void *src, size_t count)
100{
101 unsigned long flags;
102 int rc;
103
104 if (!count)
105 return 0;
106 local_irq_save(flags);
107 __arch_local_irq_stnsm(0xfbUL);
108 rc = __memcpy_real(dest, src, count);
109 local_irq_restore(flags);
Michael Holzheu92fe3132010-03-24 11:49:50 +0100110 return rc;
111}
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200112
113/*
Michael Holzheu73bf4632012-05-24 14:35:16 +0200114 * Copy memory in absolute mode (kernel to kernel)
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200115 */
Michael Holzheu73bf4632012-05-24 14:35:16 +0200116void memcpy_absolute(void *dest, void *src, size_t count)
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200117{
Michael Holzheu73bf4632012-05-24 14:35:16 +0200118 unsigned long cr0, flags, prefix;
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200119
Michael Holzheu73bf4632012-05-24 14:35:16 +0200120 flags = arch_local_irq_save();
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200121 __ctl_store(cr0, 0, 0);
122 __ctl_clear_bit(0, 28); /* disable lowcore protection */
Michael Holzheu73bf4632012-05-24 14:35:16 +0200123 prefix = store_prefix();
124 if (prefix) {
125 local_mcck_disable();
126 set_prefix(0);
127 memcpy(dest, src, count);
128 set_prefix(prefix);
129 local_mcck_enable();
130 } else {
131 memcpy(dest, src, count);
132 }
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200133 __ctl_load(cr0, 0, 0);
Michael Holzheu73bf4632012-05-24 14:35:16 +0200134 arch_local_irq_restore(flags);
Michael Holzheu7dd6b332011-08-03 16:44:19 +0200135}
Michael Holzheu7f0bf652011-10-30 15:16:39 +0100136
137/*
138 * Copy memory from kernel (real) to user (virtual)
139 */
Heiko Carstens211deca2014-01-24 12:51:27 +0100140int copy_to_user_real(void __user *dest, void *src, unsigned long count)
Michael Holzheu7f0bf652011-10-30 15:16:39 +0100141{
142 int offs = 0, size, rc;
143 char *buf;
144
145 buf = (char *) __get_free_page(GFP_KERNEL);
146 if (!buf)
147 return -ENOMEM;
148 rc = -EFAULT;
149 while (offs < count) {
150 size = min(PAGE_SIZE, count - offs);
151 if (memcpy_real(buf, src + offs, size))
152 goto out;
153 if (copy_to_user(dest + offs, buf, size))
154 goto out;
155 offs += size;
156 }
157 rc = 0;
158out:
159 free_page((unsigned long) buf);
160 return rc;
161}
162
163/*
Michael Holzheub2a68c22012-05-09 16:27:36 +0200164 * Check if physical address is within prefix or zero page
165 */
166static int is_swapped(unsigned long addr)
167{
168 unsigned long lc;
169 int cpu;
170
171 if (addr < sizeof(struct _lowcore))
172 return 1;
173 for_each_online_cpu(cpu) {
174 lc = (unsigned long) lowcore_ptr[cpu];
175 if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
176 continue;
177 return 1;
178 }
179 return 0;
180}
181
182/*
Michael Holzheub2a68c22012-05-09 16:27:36 +0200183 * Convert a physical pointer for /dev/mem access
184 *
185 * For swapped prefix pages a new buffer is returned that contains a copy of
186 * the absolute memory. The buffer size is maximum one page large.
187 */
Thierry Reding4707a342014-07-28 17:20:33 +0200188void *xlate_dev_mem_ptr(phys_addr_t addr)
Michael Holzheub2a68c22012-05-09 16:27:36 +0200189{
190 void *bounce = (void *) addr;
191 unsigned long size;
192
193 get_online_cpus();
194 preempt_disable();
195 if (is_swapped(addr)) {
196 size = PAGE_SIZE - (addr & ~PAGE_MASK);
197 bounce = (void *) __get_free_page(GFP_ATOMIC);
198 if (bounce)
Michael Holzheu73bf4632012-05-24 14:35:16 +0200199 memcpy_absolute(bounce, (void *) addr, size);
Michael Holzheub2a68c22012-05-09 16:27:36 +0200200 }
201 preempt_enable();
202 put_online_cpus();
203 return bounce;
204}
205
206/*
207 * Free converted buffer for /dev/mem access (if necessary)
208 */
Thierry Reding4707a342014-07-28 17:20:33 +0200209void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
Michael Holzheub2a68c22012-05-09 16:27:36 +0200210{
211 if ((void *) addr != buf)
212 free_page((unsigned long) buf);
213}