blob: cfceaea92724953a620dc5b87546a887f86dfea8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/kernel.h>
2#include <linux/mmzone.h>
3#include <linux/nodemask.h>
4#include <linux/spinlock.h>
5#include <linux/smp.h>
Arun Sharma600634972011-07-26 16:09:06 -07006#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <asm/sn/types.h>
8#include <asm/sn/addrs.h>
9#include <asm/sn/nmi.h>
10#include <asm/sn/arch.h>
11#include <asm/sn/sn0/hub.h>
12
13#if 0
14#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
15#else
16#define NODE_NUM_CPUS(n) CPUS_PER_NODE
17#endif
18
19#define CNODEID_NONE (cnodeid_t)-1
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21typedef unsigned long machreg_t;
22
Ralf Baechle598c5ab2010-02-27 12:53:38 +010023static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/*
Ralf Baechle49397882016-05-22 00:39:18 +020026 * Let's see what else we need to do here. Set up sp, gp?
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28void nmi_dump(void)
29{
30 void cont_nmi_dump(void);
31
32 cont_nmi_dump();
33}
34
35void install_cpu_nmi_handler(int slice)
36{
37 nmi_t *nmi_addr;
38
39 nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
40 if (nmi_addr->call_addr)
41 return;
42 nmi_addr->magic = NMI_MAGIC;
43 nmi_addr->call_addr = (void *)nmi_dump;
44 nmi_addr->call_addr_c =
45 (void *)(~((unsigned long)(nmi_addr->call_addr)));
46 nmi_addr->call_parm = 0;
47}
48
49/*
50 * Copy the cpu registers which have been saved in the IP27prom format
51 * into the eframe format for the node under consideration.
52 */
53
54void nmi_cpu_eframe_save(nasid_t nasid, int slice)
55{
56 struct reg_struct *nr;
Ralf Baechle70342282013-01-22 12:59:30 +010057 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59 /* Get the pointer to the current cpu's register set. */
60 nr = (struct reg_struct *)
61 (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
62 slice * IP27_NMI_KREGS_CPU_SIZE);
63
64 printk("NMI nasid %d: slice %d\n", nasid, slice);
65
66 /*
67 * Saved main processor registers
68 */
69 for (i = 0; i < 32; ) {
70 if ((i % 4) == 0)
71 printk("$%2d :", i);
72 printk(" %016lx", nr->gpr[i]);
73
74 i++;
75 if ((i % 4) == 0)
76 printk("\n");
77 }
78
79 printk("Hi : (value lost)\n");
80 printk("Lo : (value lost)\n");
81
82 /*
83 * Saved cp0 registers
84 */
Ralf Baechleb012cff2008-07-15 18:44:33 +010085 printk("epc : %016lx %pS\n", nr->epc, (void *) nr->epc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 printk("%s\n", print_tainted());
Ralf Baechleb012cff2008-07-15 18:44:33 +010087 printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc);
88 printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]);
Ralf Baechle70342282013-01-22 12:59:30 +010089 printk("Status: %08lx ", nr->sr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (nr->sr & ST0_KX)
92 printk("KX ");
93 if (nr->sr & ST0_SX)
Ralf Baechle70342282013-01-22 12:59:30 +010094 printk("SX ");
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 if (nr->sr & ST0_UX)
96 printk("UX ");
97
98 switch (nr->sr & ST0_KSU) {
99 case KSU_USER:
100 printk("USER ");
101 break;
102 case KSU_SUPERVISOR:
103 printk("SUPERVISOR ");
104 break;
105 case KSU_KERNEL:
106 printk("KERNEL ");
107 break;
108 default:
109 printk("BAD_MODE ");
110 break;
111 }
112
113 if (nr->sr & ST0_ERL)
114 printk("ERL ");
115 if (nr->sr & ST0_EXL)
116 printk("EXL ");
117 if (nr->sr & ST0_IE)
118 printk("IE ");
119 printk("\n");
120
121 printk("Cause : %08lx\n", nr->cause);
122 printk("PrId : %08x\n", read_c0_prid());
123 printk("BadVA : %016lx\n", nr->badva);
124 printk("CErr : %016lx\n", nr->cache_err);
125 printk("NMI_SR: %016lx\n", nr->nmi_sr);
126
127 printk("\n");
128}
129
130void nmi_dump_hub_irq(nasid_t nasid, int slice)
131{
132 hubreg_t mask0, mask1, pend0, pend1;
133
134 if (slice == 0) { /* Slice A */
135 mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
136 mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
137 } else { /* Slice B */
138 mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
139 mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
140 }
141
142 pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
143 pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
144
Ralf Baechle12e22e82009-03-30 14:49:41 +0200145 printk("PI_INT_MASK0: %16Lx PI_INT_MASK1: %16Lx\n", mask0, mask1);
146 printk("PI_INT_PEND0: %16Lx PI_INT_PEND1: %16Lx\n", pend0, pend1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 printk("\n\n");
148}
149
150/*
151 * Copy the cpu registers which have been saved in the IP27prom format
152 * into the eframe format for the node under consideration.
153 */
154void nmi_node_eframe_save(cnodeid_t cnode)
155{
156 nasid_t nasid;
157 int slice;
158
159 /* Make sure that we have a valid node */
160 if (cnode == CNODEID_NONE)
161 return;
162
163 nasid = COMPACT_TO_NASID_NODEID(cnode);
164 if (nasid == INVALID_NASID)
165 return;
166
167 /* Save the registers into eframe for each cpu */
168 for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
169 nmi_cpu_eframe_save(nasid, slice);
170 nmi_dump_hub_irq(nasid, slice);
171 }
172}
173
174/*
175 * Save the nmi cpu registers for all cpus in the system.
176 */
177void
178nmi_eframes_save(void)
179{
180 cnodeid_t cnode;
181
182 for_each_online_node(cnode)
183 nmi_node_eframe_save(cnode);
184}
185
186void
187cont_nmi_dump(void)
188{
189#ifndef REAL_NMI_SIGNAL
190 static atomic_t nmied_cpus = ATOMIC_INIT(0);
191
192 atomic_inc(&nmied_cpus);
193#endif
194 /*
Ralf Baechle2ba53e32010-02-27 12:53:38 +0100195 * Only allow 1 cpu to proceed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 */
Ralf Baechle598c5ab2010-02-27 12:53:38 +0100197 arch_spin_lock(&nmi_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199#ifdef REAL_NMI_SIGNAL
200 /*
201 * Wait up to 15 seconds for the other cpus to respond to the NMI.
202 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
203 * This is for 2 reasons:
204 * - sometimes a MMSC fail to NMI all cpus.
205 * - on 512p SN0 system, the MMSC will only send NMIs to
206 * half the cpus. Unfortunately, we don't know which cpus may be
207 * NMIed - it depends on how the site chooses to configure.
208 *
209 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
210 * send NMIs to all cpus on a 256p system.
211 */
212 for (i=0; i < 1500; i++) {
213 for_each_online_node(node)
214 if (NODEPDA(node)->dump_count == 0)
215 break;
216 if (node == MAX_NUMNODES)
217 break;
218 if (i == 1000) {
219 for_each_online_node(node)
220 if (NODEPDA(node)->dump_count == 0) {
Rusty Russell0451fb22009-03-30 22:05:11 -0600221 cpu = cpumask_first(cpumask_of_node(node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
223 CPUMASK_SETB(nmied_cpus, cpu);
224 /*
225 * cputonasid, cputoslice
226 * needs kernel cpuid
227 */
228 SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
229 }
230 }
231
232 }
233 udelay(10000);
234 }
235#else
236 while (atomic_read(&nmied_cpus) != num_online_cpus());
237#endif
238
239 /*
240 * Save the nmi cpu registers for all cpu in the eframe format.
241 */
242 nmi_eframes_save();
243 LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
244}