[PATCH] s390: improved machine check handling

Improved machine check handling.  Kernel is now able to receive machine checks
while in kernel mode (system call, interrupt and program check handling).
Also register validation is now performed.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index df5172f..76b5b19 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -109,10 +109,14 @@
 
 #ifndef __s390x__
 #define __LC_PFAULT_INTPARM             0x080
+#define __LC_CPU_TIMER_SAVE_AREA        0x0D8
 #define __LC_AREGS_SAVE_AREA            0x120
+#define __LC_GPREGS_SAVE_AREA           0x180
 #define __LC_CREGS_SAVE_AREA            0x1C0
 #else /* __s390x__ */
 #define __LC_PFAULT_INTPARM             0x11B8
+#define __LC_GPREGS_SAVE_AREA           0x1280
+#define __LC_CPU_TIMER_SAVE_AREA        0x1328
 #define __LC_AREGS_SAVE_AREA            0x1340
 #define __LC_CREGS_SAVE_AREA            0x1380
 #endif /* __s390x__ */
@@ -167,7 +171,8 @@
 	__u16        subchannel_nr;            /* 0x0ba */
 	__u32        io_int_parm;              /* 0x0bc */
 	__u32        io_int_word;              /* 0x0c0 */
-        __u8         pad3[0xD8-0xC4];          /* 0x0c4 */
+        __u8         pad3[0xD4-0xC4];          /* 0x0c4 */
+	__u32        extended_save_area_addr;  /* 0x0d4 */
 	__u32        cpu_timer_save_area[2];   /* 0x0d8 */
 	__u32        clock_comp_save_area[2];  /* 0x0e0 */
 	__u32        mcck_interruption_code[2]; /* 0x0e8 */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index fb46e90..8bd14de 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -207,6 +207,18 @@
 #endif /* __s390x__ */
 
 /*
+ * Set PSW to specified value.
+ */
+static inline void __load_psw(psw_t psw)
+{
+#ifndef __s390x__
+	asm volatile ("lpsw  0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
+#else
+	asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
+#endif
+}
+
+/*
  * Set PSW mask to specified value, while leaving the
  * PSW addr pointing to the next instruction.
  */
@@ -214,8 +226,8 @@
 static inline void __load_psw_mask (unsigned long mask)
 {
 	unsigned long addr;
-
 	psw_t psw;
+
 	psw.mask = mask;
 
 #ifndef __s390x__
@@ -241,30 +253,8 @@
  */
 static inline void enabled_wait(void)
 {
-	unsigned long reg;
-	psw_t wait_psw;
-
-	wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
-		PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY;
-#ifndef __s390x__
-	asm volatile (
-		"    basr %0,0\n"
-		"0:  la   %0,1f-0b(%0)\n"
-		"    st   %0,4(%1)\n"
-		"    oi   4(%1),0x80\n"
-		"    lpsw 0(%1)\n"
-		"1:"
-		: "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
-		: "memory", "cc" );
-#else /* __s390x__ */
-	asm volatile (
-		"    larl  %0,0f\n"
-		"    stg   %0,8(%1)\n"
-		"    lpswe 0(%1)\n"
-		"0:"
-		: "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw)
-		: "memory", "cc" );
-#endif /* __s390x__ */
+	__load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT |
+			PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY);
 }
 
 /*
@@ -273,13 +263,11 @@
 
 static inline void disabled_wait(unsigned long code)
 {
-        char psw_buffer[2*sizeof(psw_t)];
         unsigned long ctl_buf;
-        psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1)
-                                  & -sizeof(psw_t));
+        psw_t dw_psw;
 
-        dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT;
-        dw_psw->addr = code;
+        dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
+        dw_psw.addr = code;
         /* 
          * Store status and then load disabled wait psw,
          * the processor is dead afterwards
@@ -301,7 +289,7 @@
                       "    oi    0x1c0,0x10\n" /* fake protection bit */
                       "    lpsw 0(%1)"
                       : "=m" (ctl_buf)
-		      : "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" );
+		      : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" );
 #else /* __s390x__ */
         asm volatile ("    stctg 0,0,0(%2)\n"
                       "    ni    4(%2),0xef\n" /* switch off protection */
@@ -333,7 +321,7 @@
                       "    oi    0x384(1),0x10\n" /* fake protection bit */
                       "    lpswe 0(%1)"
                       : "=m" (ctl_buf)
-		      : "a" (dw_psw), "a" (&ctl_buf),
+		      : "a" (&dw_psw), "a" (&ctl_buf),
 		        "m" (dw_psw) : "cc", "0", "1");
 #endif /* __s390x__ */
 }
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 4eff8f2..fc7c96e 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -276,7 +276,7 @@
 #endif /* __s390x__ */
 
 #define PSW_KERNEL_BITS	(PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \
-			 PSW_DEFAULT_KEY)
+			 PSW_MASK_MCHECK | PSW_DEFAULT_KEY)
 #define PSW_USER_BITS	(PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
 			 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
 			 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 81514d7..e3cb3ce 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -16,6 +16,7 @@
 #include <asm/types.h>
 #include <asm/ptrace.h>
 #include <asm/setup.h>
+#include <asm/processor.h>
 
 #ifdef __KERNEL__
 
@@ -331,9 +332,6 @@
 
 #ifdef __s390x__
 
-#define __load_psw(psw) \
-        __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" );
-
 #define __ctl_load(array, low, high) ({ \
 	typedef struct { char _[sizeof(array)]; } addrtype; \
 	__asm__ __volatile__ ( \
@@ -390,9 +388,6 @@
 
 #else /* __s390x__ */
 
-#define __load_psw(psw) \
-	__asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
-
 #define __ctl_load(array, low, high) ({ \
 	typedef struct { char _[sizeof(array)]; } addrtype; \
 	__asm__ __volatile__ ( \
@@ -451,6 +446,20 @@
 /* For spinlocks etc */
 #define local_irq_save(x)	((x) = local_irq_disable())
 
+/*
+ * Use to set psw mask except for the first byte which
+ * won't be changed by this function.
+ */
+static inline void
+__set_psw_mask(unsigned long mask)
+{
+	local_save_flags(mask);
+	__load_psw_mask(mask);
+}
+
+#define local_mcck_enable()  __set_psw_mask(PSW_KERNEL_BITS)
+#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
+
 #ifdef CONFIG_SMP
 
 extern void smp_ctl_set_bit(int cr, int bit);
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index fe101d4..6c18a3f 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -96,6 +96,7 @@
 #define TIF_RESTART_SVC		4	/* restart svc with new svc number */
 #define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
 #define TIF_SINGLE_STEP		6	/* deliver sigtrap on return to user */
+#define TIF_MCCK_PENDING	7	/* machine check handling is pending */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling 
 					   TIF_NEED_RESCHED */
@@ -109,6 +110,7 @@
 #define _TIF_RESTART_SVC	(1<<TIF_RESTART_SVC)
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
+#define _TIF_MCCK_PENDING	(1<<TIF_MCCK_PENDING)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)