blob: ba2f21873cbd60ed98fb3da10d6893e8460e341f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (uweigand@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
Heiko Carstens052ff462011-01-05 12:47:28 +010011#include <linux/kernel_stat.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020012#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/ptrace.h>
20#include <linux/mman.h>
21#include <linux/mm.h>
Heiko Carstens77575912009-06-12 10:26:25 +020022#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/smp.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070024#include <linux/kdebug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/init.h>
26#include <linux/console.h>
Paul Gortmakerdcc096c2016-09-19 17:54:56 -040027#include <linux/extable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/hardirq.h>
Michael Grundy4ba069b2006-09-20 15:58:39 +020029#include <linux/kprobes.h>
Martin Schwidefskybe5ec362007-04-27 16:01:44 +020030#include <linux/uaccess.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020031#include <linux/hugetlb.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010032#include <asm/asm-offsets.h>
Martin Schwidefsky1ec27722015-08-20 17:28:44 +020033#include <asm/diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010035#include <asm/gmap.h>
Heiko Carstensd7b250e2011-05-26 09:48:24 +020036#include <asm/irq.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010037#include <asm/mmu_context.h>
David Howellsa0616cd2012-03-28 18:30:02 +010038#include <asm/facility.h>
Heiko Carstensa8061702008-04-17 07:46:26 +020039#include "../kernel/entry.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#define __FAIL_ADDR_MASK -4096L
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#define __SUBCODE_MASK 0x0600
43#define __PF_RES_FIELD 0x8000000000000000ULL
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Martin Schwidefsky50d72802009-12-07 12:51:45 +010045#define VM_FAULT_BADCONTEXT 0x010000
46#define VM_FAULT_BADMAP 0x020000
47#define VM_FAULT_BADACCESS 0x040000
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010048#define VM_FAULT_SIGNAL 0x080000
Dominik Dingel24eb3a82013-06-17 16:25:18 +020049#define VM_FAULT_PFAULT 0x100000
Martin Schwidefsky50d72802009-12-07 12:51:45 +010050
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010051static unsigned long store_indication __read_mostly;
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020052
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010053static int __init fault_init(void)
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020054{
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010055 if (test_facility(75))
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020056 store_indication = 0xc00;
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010057 return 0;
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020058}
Heiko Carstensa4f32bd2012-10-30 14:49:37 +010059early_initcall(fault_init);
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020060
Martin Schwidefsky7ecb3442009-12-07 12:51:44 +010061static inline int notify_page_fault(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +020062{
Christoph Hellwig33464e32007-05-04 18:47:46 +020063 int ret = 0;
64
65 /* kprobe_running() needs smp_processor_id() */
Heiko Carstens22e0a042010-02-26 22:37:45 +010066 if (kprobes_built_in() && !user_mode(regs)) {
Christoph Hellwig33464e32007-05-04 18:47:46 +020067 preempt_disable();
68 if (kprobe_running() && kprobe_fault_handler(regs, 14))
69 ret = 1;
70 preempt_enable();
71 }
Christoph Hellwig33464e32007-05-04 18:47:46 +020072 return ret;
Michael Grundy4ba069b2006-09-20 15:58:39 +020073}
Michael Grundy4ba069b2006-09-20 15:58:39 +020074
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76/*
77 * Unlock any spinlocks which will prevent us from getting the
Kirill Korotaevcefc8be2007-02-10 01:46:18 -080078 * message out.
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 */
80void bust_spinlocks(int yes)
81{
82 if (yes) {
83 oops_in_progress = 1;
84 } else {
85 int loglevel_save = console_loglevel;
86 console_unblank();
87 oops_in_progress = 0;
88 /*
89 * OK, the message is on the console. Now we call printk()
90 * without oops_in_progress set so that printk will give klogd
91 * a poke. Hold onto your hats...
92 */
93 console_loglevel = 15;
94 printk(" ");
95 console_loglevel = loglevel_save;
96 }
97}
98
99/*
Gerald Schaefer482b05d2007-03-05 23:35:54 +0100100 * Returns the address space associated with the fault.
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100101 * Returns 0 for kernel space and 1 for user space.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 */
Heiko Carstens457f2182014-03-21 10:42:25 +0100103static inline int user_space_fault(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Heiko Carstens457f2182014-03-21 10:42:25 +0100105 unsigned long trans_exc_code;
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 /*
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100108 * The lowest two bits of the translation exception
109 * identification indicate which paging table was used.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 */
Heiko Carstens457f2182014-03-21 10:42:25 +0100111 trans_exc_code = regs->int_parm_long & 3;
112 if (trans_exc_code == 3) /* home space -> kernel */
113 return 0;
114 if (user_mode(regs))
115 return 1;
116 if (trans_exc_code == 2) /* secondary space -> set_fs */
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100117 return current->thread.mm_segment.ar4;
Heiko Carstens457f2182014-03-21 10:42:25 +0100118 if (current->flags & PF_VCPU)
119 return 1;
120 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
Heiko Carstens3b7df342014-04-07 10:20:40 +0200123static int bad_address(void *p)
124{
125 unsigned long dummy;
126
127 return probe_kernel_address((unsigned long *)p, dummy);
128}
129
Heiko Carstens3b7df342014-04-07 10:20:40 +0200130static void dump_pagetable(unsigned long asce, unsigned long address)
131{
132 unsigned long *table = __va(asce & PAGE_MASK);
133
134 pr_alert("AS:%016lx ", asce);
135 switch (asce & _ASCE_TYPE_MASK) {
136 case _ASCE_TYPE_REGION1:
137 table = table + ((address >> 53) & 0x7ff);
138 if (bad_address(table))
139 goto bad;
140 pr_cont("R1:%016lx ", *table);
141 if (*table & _REGION_ENTRY_INVALID)
142 goto out;
143 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
144 /* fallthrough */
145 case _ASCE_TYPE_REGION2:
146 table = table + ((address >> 42) & 0x7ff);
147 if (bad_address(table))
148 goto bad;
149 pr_cont("R2:%016lx ", *table);
150 if (*table & _REGION_ENTRY_INVALID)
151 goto out;
152 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
153 /* fallthrough */
154 case _ASCE_TYPE_REGION3:
155 table = table + ((address >> 31) & 0x7ff);
156 if (bad_address(table))
157 goto bad;
158 pr_cont("R3:%016lx ", *table);
159 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
160 goto out;
161 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
162 /* fallthrough */
163 case _ASCE_TYPE_SEGMENT:
164 table = table + ((address >> 20) & 0x7ff);
165 if (bad_address(table))
166 goto bad;
Joe Perches91c08372015-01-05 04:29:18 -0800167 pr_cont("S:%016lx ", *table);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200168 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
169 goto out;
170 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
171 }
172 table = table + ((address >> 12) & 0xff);
173 if (bad_address(table))
174 goto bad;
175 pr_cont("P:%016lx ", *table);
176out:
177 pr_cont("\n");
178 return;
179bad:
180 pr_cont("BAD\n");
181}
182
Heiko Carstens3b7df342014-04-07 10:20:40 +0200183static void dump_fault_info(struct pt_regs *regs)
184{
185 unsigned long asce;
186
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100187 pr_alert("Failing address: %016lx TEID: %016lx\n",
188 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
Heiko Carstens3b7df342014-04-07 10:20:40 +0200189 pr_alert("Fault in ");
190 switch (regs->int_parm_long & 3) {
191 case 3:
192 pr_cont("home space ");
193 break;
194 case 2:
195 pr_cont("secondary space ");
196 break;
197 case 1:
198 pr_cont("access register ");
199 break;
200 case 0:
201 pr_cont("primary space ");
202 break;
203 }
204 pr_cont("mode while using ");
205 if (!user_space_fault(regs)) {
206 asce = S390_lowcore.kernel_asce;
207 pr_cont("kernel ");
208 }
209#ifdef CONFIG_PGSTE
210 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
211 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
212 asce = gmap->asce;
213 pr_cont("gmap ");
214 }
215#endif
216 else {
217 asce = S390_lowcore.user_asce;
218 pr_cont("user ");
219 }
220 pr_cont("ASCE.\n");
221 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
222}
223
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100224int show_unhandled_signals = 1;
225
226void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
Heiko Carstensab3c68e2010-05-17 10:00:21 +0200227{
228 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
229 return;
230 if (!unhandled_signal(current, signr))
231 return;
232 if (!printk_ratelimit())
233 return;
Hendrik Bruecknerdb1177ee2015-01-29 14:38:38 +0100234 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
Heiko Carstens413d4042014-11-19 13:31:08 +0100235 regs->int_code & 0xffff, regs->int_code >> 17);
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100236 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100237 printk(KERN_CONT "\n");
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100238 if (is_mm_fault)
239 dump_fault_info(regs);
Heiko Carstensab3c68e2010-05-17 10:00:21 +0200240 show_regs(regs);
241}
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243/*
244 * Send SIGSEGV to task. This is an external routine
245 * to keep the stack usage of do_page_fault small.
246 */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100247static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
249 struct siginfo si;
250
Heiko Carstens5d7ecce2016-02-24 14:27:46 +0100251 report_user_fault(regs, SIGSEGV, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 si.si_signo = SIGSEGV;
Michal Hockocf0d44d2016-05-23 15:35:51 +0200253 si.si_errno = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 si.si_code = si_code;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100255 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 force_sig_info(SIGSEGV, &si, current);
257}
258
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100259static noinline void do_no_context(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200260{
261 const struct exception_table_entry *fixup;
262
263 /* Are we prepared to handle this kernel fault? */
Heiko Carstens9cb1cce2016-01-18 13:12:19 +0100264 fixup = search_exception_tables(regs->psw.addr);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200265 if (fixup) {
Heiko Carstensfecc8682016-01-18 12:49:44 +0100266 regs->psw.addr = extable_fixup(fixup);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200267 return;
268 }
269
270 /*
271 * Oops. The kernel tried to access some bad page. We'll have to
272 * terminate things with extreme prejudice.
273 */
Heiko Carstens457f2182014-03-21 10:42:25 +0100274 if (!user_space_fault(regs))
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200275 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
Heiko Carstens3b7df342014-04-07 10:20:40 +0200276 " in virtual kernel address space\n");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200277 else
278 printk(KERN_ALERT "Unable to handle kernel paging request"
Heiko Carstens3b7df342014-04-07 10:20:40 +0200279 " in virtual user address space\n");
Heiko Carstens3b7df342014-04-07 10:20:40 +0200280 dump_fault_info(regs);
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100281 die(regs, "Oops");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200282 do_exit(SIGKILL);
283}
284
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100285static noinline void do_low_address(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200286{
287 /* Low-address protection hit in kernel mode means
288 NULL pointer write access in kernel mode. */
289 if (regs->psw.mask & PSW_MASK_PSTATE) {
290 /* Low-address protection hit in user mode 'cannot happen'. */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100291 die (regs, "Low-address protection");
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200292 do_exit(SIGKILL);
293 }
294
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100295 do_no_context(regs);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200296}
297
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100298static noinline void do_sigbus(struct pt_regs *regs)
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200299{
300 struct task_struct *tsk = current;
Martin Schwidefsky36bf9682010-10-25 16:10:35 +0200301 struct siginfo si;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200302
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200303 /*
304 * Send a sigbus, regardless of whether we were in kernel
305 * or user mode.
306 */
Martin Schwidefsky36bf9682010-10-25 16:10:35 +0200307 si.si_signo = SIGBUS;
308 si.si_errno = 0;
309 si.si_code = BUS_ADRERR;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100310 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
Martin Schwidefsky36bf9682010-10-25 16:10:35 +0200311 force_sig_info(SIGBUS, &si, tsk);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200312}
313
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100314static noinline void do_fault_error(struct pt_regs *regs, int fault)
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100315{
316 int si_code;
317
318 switch (fault) {
319 case VM_FAULT_BADACCESS:
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100320 case VM_FAULT_BADMAP:
321 /* Bad memory access. Check if it is kernel or user space. */
Heiko Carstens7d256172012-07-27 10:31:12 +0200322 if (user_mode(regs)) {
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100323 /* User mode accesses just cause a SIGSEGV */
324 si_code = (fault == VM_FAULT_BADMAP) ?
325 SEGV_MAPERR : SEGV_ACCERR;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100326 do_sigsegv(regs, si_code);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100327 return;
328 }
329 case VM_FAULT_BADCONTEXT:
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200330 case VM_FAULT_PFAULT:
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100331 do_no_context(regs);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100332 break;
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200333 case VM_FAULT_SIGNAL:
334 if (!user_mode(regs))
335 do_no_context(regs);
336 break;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100337 default: /* fault & VM_FAULT_ERROR */
Heiko Carstens99583182011-05-26 09:48:29 +0200338 if (fault & VM_FAULT_OOM) {
Heiko Carstens7d256172012-07-27 10:31:12 +0200339 if (!user_mode(regs))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100340 do_no_context(regs);
Heiko Carstens99583182011-05-26 09:48:29 +0200341 else
342 pagefault_out_of_memory();
Linus Torvalds33692f22015-01-29 10:51:32 -0800343 } else if (fault & VM_FAULT_SIGSEGV) {
344 /* Kernel mode? Handle exceptions or die */
345 if (!user_mode(regs))
346 do_no_context(regs);
347 else
348 do_sigsegv(regs, SEGV_MAPERR);
Heiko Carstens99583182011-05-26 09:48:29 +0200349 } else if (fault & VM_FAULT_SIGBUS) {
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100350 /* Kernel mode? Handle exceptions or die */
Heiko Carstens7d256172012-07-27 10:31:12 +0200351 if (!user_mode(regs))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100352 do_no_context(regs);
Martin Schwidefsky36bf9682010-10-25 16:10:35 +0200353 else
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100354 do_sigbus(regs);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100355 } else
356 BUG();
357 break;
358 }
359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/*
362 * This routine handles page faults. It determines the address,
363 * and the problem, and then passes it off to one of the appropriate
364 * routines.
365 *
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100366 * interruption code (int_code):
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * 04 Protection -> Write-Protection (suprression)
368 * 10 Segment translation -> Not present (nullification)
369 * 11 Page translation -> Not present (nullification)
370 * 3b Region third trans. -> Not present (nullification)
371 */
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100372static inline int do_exception(struct pt_regs *regs, int access)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200374#ifdef CONFIG_PGSTE
375 struct gmap *gmap;
376#endif
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200377 struct task_struct *tsk;
378 struct mm_struct *mm;
379 struct vm_area_struct *vma;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100380 unsigned long trans_exc_code;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200381 unsigned long address;
Heiko Carstens33ce6142011-05-26 09:48:30 +0200382 unsigned int flags;
383 int fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Martin Schwidefsky39efd4e2012-11-21 16:36:27 +0100385 tsk = current;
386 /*
387 * The instruction that caused the program check has
388 * been nullified. Don't signal single step via SIGTRAP.
389 */
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +0200390 clear_pt_regs_flag(regs, PIF_PER_TRAP);
Martin Schwidefsky39efd4e2012-11-21 16:36:27 +0100391
Martin Schwidefsky7ecb3442009-12-07 12:51:44 +0100392 if (notify_page_fault(regs))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100393 return 0;
Michael Grundy4ba069b2006-09-20 15:58:39 +0200394
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200395 mm = tsk->mm;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100396 trans_exc_code = regs->int_parm_long;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /*
399 * Verify that the fault happened in user space, that
400 * we are not in an interrupt and that there is a
401 * user context.
402 */
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100403 fault = VM_FAULT_BADCONTEXT;
David Hildenbrand70ffdb92015-05-11 17:52:11 +0200404 if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100405 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100407 address = trans_exc_code & __FAIL_ADDR_MASK;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200408 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200409 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
Johannes Weiner759496b2013-09-12 15:13:39 -0700410 if (user_mode(regs))
411 flags |= FAULT_FLAG_USER;
Heiko Carstens33ce6142011-05-26 09:48:30 +0200412 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
413 flags |= FAULT_FLAG_WRITE;
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200414 down_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200416#ifdef CONFIG_PGSTE
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200417 gmap = (current->flags & PF_VCPU) ?
418 (struct gmap *) S390_lowcore.gmap : NULL;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200419 if (gmap) {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200420 current->thread.gmap_addr = address;
Martin Schwidefsky4be130a2016-03-08 12:12:18 +0100421 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
David Hildenbrand4a494432016-03-08 12:31:52 +0100422 current->thread.gmap_int_code = regs->int_code & 0xffff;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200423 address = __gmap_translate(gmap, address);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200424 if (address == -EFAULT) {
425 fault = VM_FAULT_BADMAP;
426 goto out_up;
427 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200428 if (gmap->pfault_enabled)
429 flags |= FAULT_FLAG_RETRY_NOWAIT;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200430 }
431#endif
432
433retry:
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100434 fault = VM_FAULT_BADMAP;
Gerald Schaefer482b05d2007-03-05 23:35:54 +0100435 vma = find_vma(mm, address);
436 if (!vma)
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100437 goto out_up;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100438
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100439 if (unlikely(vma->vm_start > address)) {
440 if (!(vma->vm_flags & VM_GROWSDOWN))
441 goto out_up;
442 if (expand_stack(vma, address))
443 goto out_up;
444 }
445
446 /*
447 * Ok, we have a good vm_area for this memory access, so
448 * we can handle it..
449 */
450 fault = VM_FAULT_BADACCESS;
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100451 if (unlikely(!(vma->vm_flags & access)))
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100452 goto out_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Gerald Schaefer53492b12008-04-30 13:38:46 +0200454 if (is_vm_hugetlb_page(vma))
455 address &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /*
457 * If for any reason at all we couldn't handle the fault,
458 * make sure we exit gracefully rather than endlessly redo
459 * the fault.
460 */
Kirill A. Shutemovdcddffd2016-07-26 15:25:18 -0700461 fault = handle_mm_fault(vma, address, flags);
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200462 /* No reason to continue if interrupted by SIGKILL. */
463 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
464 fault = VM_FAULT_SIGNAL;
Claudio Imbrenda1ad40982018-07-16 10:38:57 +0200465 if (flags & FAULT_FLAG_RETRY_NOWAIT)
466 goto out_up;
Heiko Carstensf2c76e32012-07-27 08:54:20 +0200467 goto out;
468 }
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100469 if (unlikely(fault & VM_FAULT_ERROR))
470 goto out_up;
471
Heiko Carstens33ce6142011-05-26 09:48:30 +0200472 /*
473 * Major/minor page fault accounting is only done on the
474 * initial attempt. If we go through a retry, it is extremely
475 * likely that the page will be found in page cache at that point.
476 */
477 if (flags & FAULT_FLAG_ALLOW_RETRY) {
478 if (fault & VM_FAULT_MAJOR) {
479 tsk->maj_flt++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200480 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
Heiko Carstens33ce6142011-05-26 09:48:30 +0200481 regs, address);
482 } else {
483 tsk->min_flt++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200484 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
Heiko Carstens33ce6142011-05-26 09:48:30 +0200485 regs, address);
486 }
487 if (fault & VM_FAULT_RETRY) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200488#ifdef CONFIG_PGSTE
489 if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
490 /* FAULT_FLAG_RETRY_NOWAIT has been set,
491 * mmap_sem has not been released */
492 current->thread.gmap_pfault = 1;
493 fault = VM_FAULT_PFAULT;
494 goto out_up;
495 }
496#endif
Heiko Carstens33ce6142011-05-26 09:48:30 +0200497 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
498 * of starvation. */
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200499 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
500 FAULT_FLAG_RETRY_NOWAIT);
Shaohua Li45cac652012-10-08 16:32:19 -0700501 flags |= FAULT_FLAG_TRIED;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200502 down_read(&mm->mmap_sem);
Heiko Carstens33ce6142011-05-26 09:48:30 +0200503 goto retry;
504 }
Heiko Carstensbde69af2009-09-11 10:29:06 +0200505 }
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200506#ifdef CONFIG_PGSTE
507 if (gmap) {
508 address = __gmap_link(gmap, current->thread.gmap_addr,
509 address);
510 if (address == -EFAULT) {
511 fault = VM_FAULT_BADMAP;
512 goto out_up;
513 }
514 if (address == -ENOMEM) {
515 fault = VM_FAULT_OOM;
516 goto out_up;
517 }
518 }
519#endif
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100520 fault = 0;
521out_up:
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200522 up_read(&mm->mmap_sem);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100523out:
524 return fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525}
526
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200527void do_protection_exception(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100529 unsigned long trans_exc_code;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100530 int fault;
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100531
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100532 trans_exc_code = regs->int_parm_long;
Martin Schwidefskyf752ac42013-04-16 13:25:06 +0200533 /*
534 * Protection exceptions are suppressing, decrement psw address.
535 * The exception to this rule are aborted transactions, for these
536 * the PSW already points to the correct location.
537 */
538 if (!(regs->int_code & 0x200))
539 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200540 /*
541 * Check for low-address protection. This needs to be treated
542 * as a special case because the translation exception code
543 * field is not guaranteed to contain valid data in this case.
544 */
Martin Schwidefsky61365e12009-12-07 12:51:42 +0100545 if (unlikely(!(trans_exc_code & 4))) {
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100546 do_low_address(regs);
Martin Schwidefsky10c10312007-04-27 16:01:43 +0200547 return;
548 }
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100549 fault = do_exception(regs, VM_WRITE);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100550 if (unlikely(fault))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100551 do_fault_error(regs, fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552}
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200553NOKPROBE_SYMBOL(do_protection_exception);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200555void do_dat_exception(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556{
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100557 int access, fault;
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100558
Martin Schwidefsky1ab947d2009-12-07 12:51:46 +0100559 access = VM_READ | VM_EXEC | VM_WRITE;
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100560 fault = do_exception(regs, access);
Martin Schwidefsky50d72802009-12-07 12:51:45 +0100561 if (unlikely(fault))
Martin Schwidefskyaa33c8c2011-12-27 11:27:18 +0100562 do_fault_error(regs, fault);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563}
Heiko Carstens7a5388d2014-10-22 12:42:38 +0200564NOKPROBE_SYMBOL(do_dat_exception);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566#ifdef CONFIG_PFAULT
567/*
568 * 'pfault' pseudo page faults routines.
569 */
Heiko Carstensfb0a9d72011-01-05 12:47:39 +0100570static int pfault_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572static int __init nopfault(char *str)
573{
574 pfault_disable = 1;
575 return 1;
576}
577
578__setup("nopfault", nopfault);
579
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200580struct pfault_refbk {
581 u16 refdiagc;
582 u16 reffcode;
583 u16 refdwlen;
584 u16 refversn;
585 u64 refgaddr;
586 u64 refselmk;
587 u64 refcmpmk;
588 u64 reserved;
589} __attribute__ ((packed, aligned(8)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591int pfault_init(void)
592{
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200593 struct pfault_refbk refbk = {
594 .refdiagc = 0x258,
595 .reffcode = 0,
596 .refdwlen = 5,
597 .refversn = 2,
Christian Borntraegere22cf8c2015-10-06 18:06:15 +0200598 .refgaddr = __LC_LPP,
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200599 .refselmk = 1ULL << 48,
600 .refcmpmk = 1ULL << 48,
601 .reserved = __PF_RES_FIELD };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 int rc;
603
Carsten Ottef32269a2011-12-27 11:27:11 +0100604 if (pfault_disable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 return -1;
Martin Schwidefsky1ec27722015-08-20 17:28:44 +0200606 diag_stat_inc(DIAG_STAT_X258);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200607 asm volatile(
608 " diag %1,%0,0x258\n"
609 "0: j 2f\n"
610 "1: la %0,8\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 "2:\n"
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200612 EX_TABLE(0b,1b)
613 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 return rc;
615}
616
617void pfault_fini(void)
618{
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200619 struct pfault_refbk refbk = {
620 .refdiagc = 0x258,
621 .reffcode = 1,
622 .refdwlen = 5,
623 .refversn = 2,
624 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Carsten Ottef32269a2011-12-27 11:27:11 +0100626 if (pfault_disable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 return;
Martin Schwidefsky1ec27722015-08-20 17:28:44 +0200628 diag_stat_inc(DIAG_STAT_X258);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200629 asm volatile(
630 " diag %0,0,0x258\n"
Heiko Carstens6c22c982016-06-10 09:57:05 +0200631 "0: nopr %%r7\n"
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200632 EX_TABLE(0b,0b)
633 : : "a" (&refbk), "m" (refbk) : "cc");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200636static DEFINE_SPINLOCK(pfault_lock);
637static LIST_HEAD(pfault_list);
638
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100639#define PF_COMPLETE 0x0080
640
641/*
642 * The mechanism of our pfault code: if Linux is running as guest, runs a user
643 * space process and the user space process accesses a page that the host has
644 * paged out we get a pfault interrupt.
645 *
646 * This allows us, within the guest, to schedule a different process. Without
647 * this mechanism the host would have to suspend the whole virtual cpu until
648 * the page has been paged in.
649 *
650 * So when we get such an interrupt then we set the state of the current task
651 * to uninterruptible and also set the need_resched flag. Both happens within
652 * interrupt context(!). If we later on want to return to user space we
653 * recognize the need_resched flag and then call schedule(). It's not very
654 * obvious how this works...
655 *
656 * Of course we have a lot of additional fun with the completion interrupt (->
657 * host signals that a page of a process has been paged in and the process can
658 * continue to run). This interrupt can arrive on any cpu and, since we have
659 * virtual cpus, actually appear before the interrupt that signals that a page
660 * is missing.
661 */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400662static void pfault_interrupt(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200663 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
665 struct task_struct *tsk;
666 __u16 subcode;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200667 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 /*
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100670 * Get the external interruption subcode & pfault initial/completion
671 * signal bit. VM stores this in the 'cpu address' field associated
672 * with the external interrupt.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400674 subcode = ext_code.subcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if ((subcode & 0xff00) != __SUBCODE_MASK)
676 return;
Heiko Carstens420f42e2013-01-02 15:18:18 +0100677 inc_irq_stat(IRQEXT_PFL);
Heiko Carstens54c27792012-05-10 09:44:35 +0200678 /* Get the token (= pid of the affected task). */
Christian Borntraegere22cf8c2015-10-06 18:06:15 +0200679 pid = param64 & LPP_PFAULT_PID_MASK;
Heiko Carstens54c27792012-05-10 09:44:35 +0200680 rcu_read_lock();
681 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
682 if (tsk)
683 get_task_struct(tsk);
684 rcu_read_unlock();
685 if (!tsk)
686 return;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200687 spin_lock(&pfault_lock);
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100688 if (subcode & PF_COMPLETE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 /* signal bit is set -> a page has been swapped in by VM */
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200690 if (tsk->thread.pfault_wait == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 /* Initial interrupt was faster than the completion
692 * interrupt. pfault_wait is valid. Set pfault_wait
693 * back to zero and wake up the process. This can
694 * safely be done because the task is still sleeping
Martin Schwidefskyb6d09442005-09-03 15:58:02 -0700695 * and can't produce new pfaults. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 tsk->thread.pfault_wait = 0;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200697 list_del(&tsk->thread.list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 wake_up_process(tsk);
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200699 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200700 } else {
701 /* Completion interrupt was faster than initial
702 * interrupt. Set pfault_wait to -1 so the initial
Heiko Carstensfa2fb2f2011-11-14 11:19:01 +0100703 * interrupt doesn't put the task to sleep.
704 * If the task is not running, ignore the completion
705 * interrupt since it must be a leftover of a PFAULT
706 * CANCEL operation which didn't remove all pending
707 * completion interrupts. */
708 if (tsk->state == TASK_RUNNING)
709 tsk->thread.pfault_wait = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 }
711 } else {
712 /* signal bit not set -> a real page is missing. */
Heiko Carstensd49f47f2012-05-10 10:47:21 +0200713 if (WARN_ON_ONCE(tsk != current))
714 goto out;
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200715 if (tsk->thread.pfault_wait == 1) {
716 /* Already on the list with a reference: put to sleep */
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100717 goto block;
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200718 } else if (tsk->thread.pfault_wait == -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 /* Completion interrupt was faster than the initial
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200720 * interrupt (pfault_wait == -1). Set pfault_wait
721 * back to zero and exit. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 tsk->thread.pfault_wait = 0;
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200723 } else {
724 /* Initial interrupt arrived before completion
Heiko Carstensd5e50a52012-05-09 09:37:30 +0200725 * interrupt. Let the task sleep.
726 * An extra task reference is needed since a different
727 * cpu may set the task state to TASK_RUNNING again
728 * before the scheduler is reached. */
729 get_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200730 tsk->thread.pfault_wait = 1;
731 list_add(&tsk->thread.list, &pfault_list);
Peter Zijlstra0227f7c2016-03-22 21:42:53 +0100732block:
733 /* Since this must be a userspace fault, there
734 * is no kernel task state to trample. Rely on the
735 * return to userspace schedule() to block. */
736 __set_current_state(TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 set_tsk_need_resched(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 }
Heiko Carstensd49f47f2012-05-10 10:47:21 +0200740out:
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200741 spin_unlock(&pfault_lock);
Heiko Carstens54c27792012-05-10 09:44:35 +0200742 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200743}
744
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200745static int pfault_cpu_dead(unsigned int cpu)
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200746{
747 struct thread_struct *thread, *next;
748 struct task_struct *tsk;
749
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200750 spin_lock_irq(&pfault_lock);
751 list_for_each_entry_safe(thread, next, &pfault_list, list) {
752 thread->pfault_wait = 0;
753 list_del(&thread->list);
754 tsk = container_of(thread, struct task_struct, thread);
755 wake_up_process(tsk);
756 put_task_struct(tsk);
Heiko Carstensf2db2e62011-05-23 10:24:34 +0200757 }
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200758 spin_unlock_irq(&pfault_lock);
759 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Heiko Carstensfb0a9d72011-01-05 12:47:39 +0100762static int __init pfault_irq_init(void)
Heiko Carstens29b08d22006-12-04 15:40:40 +0100763{
Heiko Carstensfb0a9d72011-01-05 12:47:39 +0100764 int rc;
Heiko Carstens29b08d22006-12-04 15:40:40 +0100765
Thomas Huth1dad0932014-03-31 15:24:08 +0200766 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200767 if (rc)
768 goto out_extint;
769 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
770 if (rc)
771 goto out_pfault;
Heiko Carstens82003c32013-09-04 13:35:45 +0200772 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Sebastian Andrzej Siewior84c9cee2016-09-06 19:04:53 +0200773 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
774 NULL, pfault_cpu_dead);
Heiko Carstensfb0a9d72011-01-05 12:47:39 +0100775 return 0;
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200776
777out_pfault:
Thomas Huth1dad0932014-03-31 15:24:08 +0200778 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200779out_extint:
780 pfault_disable = 1;
781 return rc;
Heiko Carstens29b08d22006-12-04 15:40:40 +0100782}
Heiko Carstensfb0a9d72011-01-05 12:47:39 +0100783early_initcall(pfault_irq_init);
784
Heiko Carstens7dd8fe12011-05-23 10:24:35 +0200785#endif /* CONFIG_PFAULT */