Heiko Carstens | 88df125 | 2009-06-12 10:26:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Access kernel memory without faulting -- s390 specific implementation. |
| 3 | * |
| 4 | * Copyright IBM Corp. 2009 |
| 5 | * |
| 6 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, |
| 7 | * |
| 8 | */ |
| 9 | |
| 10 | #include <linux/uaccess.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/errno.h> |
Michael Holzheu | 7f0bf65 | 2011-10-30 15:16:39 +0100 | [diff] [blame] | 14 | #include <linux/gfp.h> |
Michael Holzheu | b2a68c2 | 2012-05-09 16:27:36 +0200 | [diff] [blame] | 15 | #include <linux/cpu.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 16 | #include <asm/ctl_reg.h> |
Heiko Carstens | 88df125 | 2009-06-12 10:26:42 +0200 | [diff] [blame] | 17 | |
| 18 | /* |
| 19 | * This function writes to kernel memory bypassing DAT and possible |
| 20 | * write protection. It copies one to four bytes from src to dst |
| 21 | * using the stura instruction. |
| 22 | * Returns the number of bytes copied or -EFAULT. |
| 23 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 24 | static long probe_kernel_write_odd(void *dst, const void *src, size_t size) |
Heiko Carstens | 88df125 | 2009-06-12 10:26:42 +0200 | [diff] [blame] | 25 | { |
| 26 | unsigned long count, aligned; |
| 27 | int offset, mask; |
| 28 | int rc = -EFAULT; |
| 29 | |
| 30 | aligned = (unsigned long) dst & ~3UL; |
| 31 | offset = (unsigned long) dst & 3; |
| 32 | count = min_t(unsigned long, 4 - offset, size); |
| 33 | mask = (0xf << (4 - count)) & 0xf; |
| 34 | mask >>= offset; |
| 35 | asm volatile( |
| 36 | " bras 1,0f\n" |
| 37 | " icm 0,0,0(%3)\n" |
| 38 | "0: l 0,0(%1)\n" |
| 39 | " lra %1,0(%1)\n" |
| 40 | "1: ex %2,0(1)\n" |
| 41 | "2: stura 0,%1\n" |
| 42 | " la %0,0\n" |
| 43 | "3:\n" |
| 44 | EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) |
| 45 | : "+d" (rc), "+a" (aligned) |
| 46 | : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); |
| 47 | return rc ? rc : count; |
| 48 | } |
| 49 | |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 50 | long probe_kernel_write(void *dst, const void *src, size_t size) |
Heiko Carstens | 88df125 | 2009-06-12 10:26:42 +0200 | [diff] [blame] | 51 | { |
| 52 | long copied = 0; |
| 53 | |
| 54 | while (size) { |
| 55 | copied = probe_kernel_write_odd(dst, src, size); |
| 56 | if (copied < 0) |
| 57 | break; |
| 58 | dst += copied; |
| 59 | src += copied; |
| 60 | size -= copied; |
| 61 | } |
| 62 | return copied < 0 ? -EFAULT : 0; |
| 63 | } |
Michael Holzheu | 92fe313 | 2010-03-24 11:49:50 +0100 | [diff] [blame] | 64 | |
Michael Holzheu | b785e0d | 2012-04-11 14:28:06 +0200 | [diff] [blame] | 65 | static int __memcpy_real(void *dest, void *src, size_t count) |
Michael Holzheu | 92fe313 | 2010-03-24 11:49:50 +0100 | [diff] [blame] | 66 | { |
| 67 | register unsigned long _dest asm("2") = (unsigned long) dest; |
| 68 | register unsigned long _len1 asm("3") = (unsigned long) count; |
| 69 | register unsigned long _src asm("4") = (unsigned long) src; |
| 70 | register unsigned long _len2 asm("5") = (unsigned long) count; |
Michael Holzheu | 92fe313 | 2010-03-24 11:49:50 +0100 | [diff] [blame] | 71 | int rc = -EFAULT; |
| 72 | |
Michael Holzheu | 92fe313 | 2010-03-24 11:49:50 +0100 | [diff] [blame] | 73 | asm volatile ( |
| 74 | "0: mvcle %1,%2,0x0\n" |
| 75 | "1: jo 0b\n" |
| 76 | " lhi %0,0x0\n" |
| 77 | "2:\n" |
| 78 | EX_TABLE(1b,2b) |
| 79 | : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), |
| 80 | "+d" (_len2), "=m" (*((long *) dest)) |
| 81 | : "m" (*((long *) src)) |
| 82 | : "cc", "memory"); |
Michael Holzheu | b785e0d | 2012-04-11 14:28:06 +0200 | [diff] [blame] | 83 | return rc; |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * Copy memory in real mode (kernel to kernel) |
| 88 | */ |
| 89 | int memcpy_real(void *dest, void *src, size_t count) |
| 90 | { |
| 91 | unsigned long flags; |
| 92 | int rc; |
| 93 | |
| 94 | if (!count) |
| 95 | return 0; |
| 96 | local_irq_save(flags); |
| 97 | __arch_local_irq_stnsm(0xfbUL); |
| 98 | rc = __memcpy_real(dest, src, count); |
| 99 | local_irq_restore(flags); |
Michael Holzheu | 92fe313 | 2010-03-24 11:49:50 +0100 | [diff] [blame] | 100 | return rc; |
| 101 | } |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 102 | |
| 103 | /* |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 104 | * Copy memory in absolute mode (kernel to kernel) |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 105 | */ |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 106 | void memcpy_absolute(void *dest, void *src, size_t count) |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 107 | { |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 108 | unsigned long cr0, flags, prefix; |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 109 | |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 110 | flags = arch_local_irq_save(); |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 111 | __ctl_store(cr0, 0, 0); |
| 112 | __ctl_clear_bit(0, 28); /* disable lowcore protection */ |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 113 | prefix = store_prefix(); |
| 114 | if (prefix) { |
| 115 | local_mcck_disable(); |
| 116 | set_prefix(0); |
| 117 | memcpy(dest, src, count); |
| 118 | set_prefix(prefix); |
| 119 | local_mcck_enable(); |
| 120 | } else { |
| 121 | memcpy(dest, src, count); |
| 122 | } |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 123 | __ctl_load(cr0, 0, 0); |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 124 | arch_local_irq_restore(flags); |
Michael Holzheu | 7dd6b33 | 2011-08-03 16:44:19 +0200 | [diff] [blame] | 125 | } |
Michael Holzheu | 7f0bf65 | 2011-10-30 15:16:39 +0100 | [diff] [blame] | 126 | |
| 127 | /* |
| 128 | * Copy memory from kernel (real) to user (virtual) |
| 129 | */ |
| 130 | int copy_to_user_real(void __user *dest, void *src, size_t count) |
| 131 | { |
| 132 | int offs = 0, size, rc; |
| 133 | char *buf; |
| 134 | |
| 135 | buf = (char *) __get_free_page(GFP_KERNEL); |
| 136 | if (!buf) |
| 137 | return -ENOMEM; |
| 138 | rc = -EFAULT; |
| 139 | while (offs < count) { |
| 140 | size = min(PAGE_SIZE, count - offs); |
| 141 | if (memcpy_real(buf, src + offs, size)) |
| 142 | goto out; |
| 143 | if (copy_to_user(dest + offs, buf, size)) |
| 144 | goto out; |
| 145 | offs += size; |
| 146 | } |
| 147 | rc = 0; |
| 148 | out: |
| 149 | free_page((unsigned long) buf); |
| 150 | return rc; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * Copy memory from user (virtual) to kernel (real) |
| 155 | */ |
| 156 | int copy_from_user_real(void *dest, void __user *src, size_t count) |
| 157 | { |
| 158 | int offs = 0, size, rc; |
| 159 | char *buf; |
| 160 | |
| 161 | buf = (char *) __get_free_page(GFP_KERNEL); |
| 162 | if (!buf) |
| 163 | return -ENOMEM; |
| 164 | rc = -EFAULT; |
| 165 | while (offs < count) { |
| 166 | size = min(PAGE_SIZE, count - offs); |
| 167 | if (copy_from_user(buf, src + offs, size)) |
| 168 | goto out; |
| 169 | if (memcpy_real(dest + offs, buf, size)) |
| 170 | goto out; |
| 171 | offs += size; |
| 172 | } |
| 173 | rc = 0; |
| 174 | out: |
| 175 | free_page((unsigned long) buf); |
| 176 | return rc; |
| 177 | } |
Michael Holzheu | b2a68c2 | 2012-05-09 16:27:36 +0200 | [diff] [blame] | 178 | |
| 179 | /* |
| 180 | * Check if physical address is within prefix or zero page |
| 181 | */ |
| 182 | static int is_swapped(unsigned long addr) |
| 183 | { |
| 184 | unsigned long lc; |
| 185 | int cpu; |
| 186 | |
| 187 | if (addr < sizeof(struct _lowcore)) |
| 188 | return 1; |
| 189 | for_each_online_cpu(cpu) { |
| 190 | lc = (unsigned long) lowcore_ptr[cpu]; |
| 191 | if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc) |
| 192 | continue; |
| 193 | return 1; |
| 194 | } |
| 195 | return 0; |
| 196 | } |
| 197 | |
| 198 | /* |
Michael Holzheu | b2a68c2 | 2012-05-09 16:27:36 +0200 | [diff] [blame] | 199 | * Convert a physical pointer for /dev/mem access |
| 200 | * |
| 201 | * For swapped prefix pages a new buffer is returned that contains a copy of |
| 202 | * the absolute memory. The buffer size is maximum one page large. |
| 203 | */ |
| 204 | void *xlate_dev_mem_ptr(unsigned long addr) |
| 205 | { |
| 206 | void *bounce = (void *) addr; |
| 207 | unsigned long size; |
| 208 | |
| 209 | get_online_cpus(); |
| 210 | preempt_disable(); |
| 211 | if (is_swapped(addr)) { |
| 212 | size = PAGE_SIZE - (addr & ~PAGE_MASK); |
| 213 | bounce = (void *) __get_free_page(GFP_ATOMIC); |
| 214 | if (bounce) |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 215 | memcpy_absolute(bounce, (void *) addr, size); |
Michael Holzheu | b2a68c2 | 2012-05-09 16:27:36 +0200 | [diff] [blame] | 216 | } |
| 217 | preempt_enable(); |
| 218 | put_online_cpus(); |
| 219 | return bounce; |
| 220 | } |
| 221 | |
| 222 | /* |
| 223 | * Free converted buffer for /dev/mem access (if necessary) |
| 224 | */ |
| 225 | void unxlate_dev_mem_ptr(unsigned long addr, void *buf) |
| 226 | { |
| 227 | if ((void *) addr != buf) |
| 228 | free_page((unsigned long) buf); |
| 229 | } |