blob: 4efcde1b9d5463612603dfe2a6a7a1476f13ec84 [file] [log] [blame]
Mike Travis1e019422013-09-23 16:25:00 -05001/*
2 * SGI NMI support routines
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Mike Travis
20 */
21
22#include <linux/cpu.h>
Mike Travis0d12ef02013-09-23 16:25:01 -050023#include <linux/delay.h>
24#include <linux/module.h>
Mike Travis1e019422013-09-23 16:25:00 -050025#include <linux/nmi.h>
Mike Travis0d12ef02013-09-23 16:25:01 -050026#include <linux/sched.h>
27#include <linux/slab.h>
Mike Travis1e019422013-09-23 16:25:00 -050028
29#include <asm/apic.h>
Mike Travis0d12ef02013-09-23 16:25:01 -050030#include <asm/current.h>
31#include <asm/kdebug.h>
32#include <asm/local64.h>
Mike Travis1e019422013-09-23 16:25:00 -050033#include <asm/nmi.h>
34#include <asm/uv/uv.h>
35#include <asm/uv/uv_hub.h>
36#include <asm/uv/uv_mmrs.h>
37
Mike Travis0d12ef02013-09-23 16:25:01 -050038/*
39 * UV handler for NMI
40 *
41 * Handle system-wide NMI events generated by the global 'power nmi' command.
42 *
43 * Basic operation is to field the NMI interrupt on each cpu and wait
44 * until all cpus have arrived into the nmi handler. If some cpus do not
45 * make it into the handler, try and force them in with the IPI(NMI) signal.
46 *
47 * We also have to lessen UV Hub MMR accesses as much as possible as this
48 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
49 * can cause system problems to occur.
50 *
51 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
52 * chain. This reduces the number of false NMI calls when the perf
53 * tools are running which generate an enormous number of NMIs per
54 * second (~4M/s for 1024 cpu threads). Our secondary NMI handler is
55 * very short as it only checks that if it has been "pinged" with the
56 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
57 *
58 */
59
60static struct uv_hub_nmi_s **uv_hub_nmi_list;
61
62DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
63EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi);
64
65static unsigned long nmi_mmr;
66static unsigned long nmi_mmr_clear;
67static unsigned long nmi_mmr_pending;
68
69static atomic_t uv_in_nmi;
70static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
71static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
72static atomic_t uv_nmi_slave_continue;
73static cpumask_var_t uv_nmi_cpu_mask;
74
75/* Values for uv_nmi_slave_continue */
76#define SLAVE_CLEAR 0
77#define SLAVE_CONTINUE 1
78#define SLAVE_EXIT 2
Mike Travis1e019422013-09-23 16:25:00 -050079
80/*
Mike Travis0d12ef02013-09-23 16:25:01 -050081 * Default is all stack dumps go to the console and buffer.
82 * Lower level to send to log buffer only.
83 */
84static int uv_nmi_loglevel = 7;
85module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
86
87/*
88 * The following values show statistics on how perf events are affecting
89 * this system.
90 */
91static int param_get_local64(char *buffer, const struct kernel_param *kp)
92{
93 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
94}
95
96static int param_set_local64(const char *val, const struct kernel_param *kp)
97{
98 /* clear on any write */
99 local64_set((local64_t *)kp->arg, 0);
100 return 0;
101}
102
103static struct kernel_param_ops param_ops_local64 = {
104 .get = param_get_local64,
105 .set = param_set_local64,
106};
107#define param_check_local64(name, p) __param_check(name, p, local64_t)
108
109static local64_t uv_nmi_count;
110module_param_named(nmi_count, uv_nmi_count, local64, 0644);
111
112static local64_t uv_nmi_misses;
113module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
114
115static local64_t uv_nmi_ping_count;
116module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
117
118static local64_t uv_nmi_ping_misses;
119module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
120
121/*
122 * Following values allow tuning for large systems under heavy loading
123 */
124static int uv_nmi_initial_delay = 100;
125module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
126
127static int uv_nmi_slave_delay = 100;
128module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
129
130static int uv_nmi_loop_delay = 100;
131module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
132
133static int uv_nmi_trigger_delay = 10000;
134module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
135
136static int uv_nmi_wait_count = 100;
137module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
138
139static int uv_nmi_retry_count = 500;
140module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
141
Mike Travis3c121d92013-09-23 16:25:02 -0500142/*
143 * Valid NMI Actions:
144 * "dump" - dump process stack for each cpu
145 * "ips" - dump IP info for each cpu
146 */
147static char uv_nmi_action[8] = "dump";
148module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
149
150static inline bool uv_nmi_action_is(const char *action)
151{
152 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
153}
154
Mike Travis0d12ef02013-09-23 16:25:01 -0500155/* Setup which NMI support is present in system */
156static void uv_nmi_setup_mmrs(void)
157{
158 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) {
159 uv_write_local_mmr(UVH_NMI_MMRX_REQ,
160 1UL << UVH_NMI_MMRX_REQ_SHIFT);
161 nmi_mmr = UVH_NMI_MMRX;
162 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR;
163 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT;
164 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE);
165 } else {
166 nmi_mmr = UVH_NMI_MMR;
167 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
168 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
169 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
170 }
171}
172
173/* Read NMI MMR and check if NMI flag was set by BMC. */
174static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
175{
176 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
177 atomic_inc(&hub_nmi->read_mmr_count);
178 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
179}
180
181static inline void uv_local_mmr_clear_nmi(void)
182{
183 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
184}
185
186/*
187 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
188 * return true. If first cpu in on the system, set global "in_nmi" flag.
189 */
190static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
191{
192 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
193
194 if (first) {
195 atomic_set(&hub_nmi->cpu_owner, cpu);
196 if (atomic_add_unless(&uv_in_nmi, 1, 1))
197 atomic_set(&uv_nmi_cpu, cpu);
198
199 atomic_inc(&hub_nmi->nmi_count);
200 }
201 return first;
202}
203
204/* Check if this is a system NMI event */
205static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
206{
207 int cpu = smp_processor_id();
208 int nmi = 0;
209
210 local64_inc(&uv_nmi_count);
211 uv_cpu_nmi.queries++;
212
213 do {
214 nmi = atomic_read(&hub_nmi->in_nmi);
215 if (nmi)
216 break;
217
218 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
219
220 /* check hub MMR NMI flag */
221 if (uv_nmi_test_mmr(hub_nmi)) {
222 uv_set_in_nmi(cpu, hub_nmi);
223 nmi = 1;
224 break;
225 }
226
227 /* MMR NMI flag is clear */
228 raw_spin_unlock(&hub_nmi->nmi_lock);
229
230 } else {
231 /* wait a moment for the hub nmi locker to set flag */
232 cpu_relax();
233 udelay(uv_nmi_slave_delay);
234
235 /* re-check hub in_nmi flag */
236 nmi = atomic_read(&hub_nmi->in_nmi);
237 if (nmi)
238 break;
239 }
240
241 /* check if this BMC missed setting the MMR NMI flag */
242 if (!nmi) {
243 nmi = atomic_read(&uv_in_nmi);
244 if (nmi)
245 uv_set_in_nmi(cpu, hub_nmi);
246 }
247
248 } while (0);
249
250 if (!nmi)
251 local64_inc(&uv_nmi_misses);
252
253 return nmi;
254}
255
256/* Need to reset the NMI MMR register, but only once per hub. */
257static inline void uv_clear_nmi(int cpu)
258{
259 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
260
261 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
262 atomic_set(&hub_nmi->cpu_owner, -1);
263 atomic_set(&hub_nmi->in_nmi, 0);
264 uv_local_mmr_clear_nmi();
265 raw_spin_unlock(&hub_nmi->nmi_lock);
266 }
267}
268
269/* Print non-responding cpus */
270static void uv_nmi_nr_cpus_pr(char *fmt)
271{
272 static char cpu_list[1024];
273 int len = sizeof(cpu_list);
274 int c = cpumask_weight(uv_nmi_cpu_mask);
275 int n = cpulist_scnprintf(cpu_list, len, uv_nmi_cpu_mask);
276
277 if (n >= len-1)
278 strcpy(&cpu_list[len - 6], "...\n");
279
280 printk(fmt, c, cpu_list);
281}
282
283/* Ping non-responding cpus attemping to force them into the NMI handler */
284static void uv_nmi_nr_cpus_ping(void)
285{
286 int cpu;
287
288 for_each_cpu(cpu, uv_nmi_cpu_mask)
289 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1);
290
291 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
292}
293
294/* Clean up flags for cpus that ignored both NMI and ping */
295static void uv_nmi_cleanup_mask(void)
296{
297 int cpu;
298
299 for_each_cpu(cpu, uv_nmi_cpu_mask) {
300 atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0);
301 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT);
302 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
303 }
304}
305
306/* Loop waiting as cpus enter nmi handler */
307static int uv_nmi_wait_cpus(int first)
308{
309 int i, j, k, n = num_online_cpus();
310 int last_k = 0, waiting = 0;
311
312 if (first) {
313 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
314 k = 0;
315 } else {
316 k = n - cpumask_weight(uv_nmi_cpu_mask);
317 }
318
319 udelay(uv_nmi_initial_delay);
320 for (i = 0; i < uv_nmi_retry_count; i++) {
321 int loop_delay = uv_nmi_loop_delay;
322
323 for_each_cpu(j, uv_nmi_cpu_mask) {
324 if (atomic_read(&uv_cpu_nmi_per(j).state)) {
325 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
326 if (++k >= n)
327 break;
328 }
329 }
330 if (k >= n) { /* all in? */
331 k = n;
332 break;
333 }
334 if (last_k != k) { /* abort if no new cpus coming in */
335 last_k = k;
336 waiting = 0;
337 } else if (++waiting > uv_nmi_wait_count)
338 break;
339
340 /* extend delay if waiting only for cpu 0 */
341 if (waiting && (n - k) == 1 &&
342 cpumask_test_cpu(0, uv_nmi_cpu_mask))
343 loop_delay *= 100;
344
345 udelay(loop_delay);
346 }
347 atomic_set(&uv_nmi_cpus_in_nmi, k);
348 return n - k;
349}
350
351/* Wait until all slave cpus have entered UV NMI handler */
352static void uv_nmi_wait(int master)
353{
354 /* indicate this cpu is in */
355 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN);
356
357 /* if not the first cpu in (the master), then we are a slave cpu */
358 if (!master)
359 return;
360
361 do {
362 /* wait for all other cpus to gather here */
363 if (!uv_nmi_wait_cpus(1))
364 break;
365
366 /* if not all made it in, send IPI NMI to them */
367 uv_nmi_nr_cpus_pr(KERN_ALERT
368 "UV: Sending NMI IPI to %d non-responding CPUs: %s\n");
369 uv_nmi_nr_cpus_ping();
370
371 /* if all cpus are in, then done */
372 if (!uv_nmi_wait_cpus(0))
373 break;
374
375 uv_nmi_nr_cpus_pr(KERN_ALERT
376 "UV: %d CPUs not in NMI loop: %s\n");
377 } while (0);
378
379 pr_alert("UV: %d of %d CPUs in NMI\n",
380 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
381}
382
Mike Travis3c121d92013-09-23 16:25:02 -0500383static void uv_nmi_dump_cpu_ip_hdr(void)
384{
385 printk(KERN_DEFAULT
386 "\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
387 "CPU", "PID", "COMMAND", "IP");
388}
389
390static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
391{
392 printk(KERN_DEFAULT "UV: %4d %6d %-32.32s ",
393 cpu, current->pid, current->comm);
394
395 printk_address(regs->ip, 1);
396}
397
Mike Travis0d12ef02013-09-23 16:25:01 -0500398/* Dump this cpu's state */
399static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
400{
401 const char *dots = " ................................. ";
402
Mike Travis3c121d92013-09-23 16:25:02 -0500403 if (uv_nmi_action_is("ips")) {
404 if (cpu == 0)
405 uv_nmi_dump_cpu_ip_hdr();
406
407 if (current->pid != 0)
408 uv_nmi_dump_cpu_ip(cpu, regs);
409
410 } else if (uv_nmi_action_is("dump")) {
411 printk(KERN_DEFAULT
412 "UV:%sNMI process trace for CPU %d\n", dots, cpu);
413 show_regs(regs);
414 }
Mike Travis0d12ef02013-09-23 16:25:01 -0500415 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
416}
417
418/* Trigger a slave cpu to dump it's state */
419static void uv_nmi_trigger_dump(int cpu)
420{
421 int retry = uv_nmi_trigger_delay;
422
423 if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN)
424 return;
425
426 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP);
427 do {
428 cpu_relax();
429 udelay(10);
430 if (atomic_read(&uv_cpu_nmi_per(cpu).state)
431 != UV_NMI_STATE_DUMP)
432 return;
433 } while (--retry > 0);
434
435 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
436 atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE);
437}
438
439/* Wait until all cpus ready to exit */
440static void uv_nmi_sync_exit(int master)
441{
442 atomic_dec(&uv_nmi_cpus_in_nmi);
443 if (master) {
444 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
445 cpu_relax();
446 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
447 } else {
448 while (atomic_read(&uv_nmi_slave_continue))
449 cpu_relax();
450 }
451}
452
453/* Walk through cpu list and dump state of each */
454static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
455{
456 if (master) {
457 int tcpu;
458 int ignored = 0;
459 int saved_console_loglevel = console_loglevel;
460
Mike Travis3c121d92013-09-23 16:25:02 -0500461 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
462 uv_nmi_action_is("ips") ? "IPs" : "processes",
Mike Travis0d12ef02013-09-23 16:25:01 -0500463 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
464
465 console_loglevel = uv_nmi_loglevel;
466 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
467 for_each_online_cpu(tcpu) {
468 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
469 ignored++;
470 else if (tcpu == cpu)
471 uv_nmi_dump_state_cpu(tcpu, regs);
472 else
473 uv_nmi_trigger_dump(tcpu);
474 }
475 if (ignored)
476 printk(KERN_DEFAULT "UV: %d CPUs ignored NMI\n",
477 ignored);
478
479 console_loglevel = saved_console_loglevel;
480 pr_alert("UV: process trace complete\n");
481 } else {
482 while (!atomic_read(&uv_nmi_slave_continue))
483 cpu_relax();
484 while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
485 cpu_relax();
486 uv_nmi_dump_state_cpu(cpu, regs);
487 }
488 uv_nmi_sync_exit(master);
489}
490
491static void uv_nmi_touch_watchdogs(void)
492{
493 touch_softlockup_watchdog_sync();
494 clocksource_touch_watchdog();
495 rcu_cpu_stall_reset();
496 touch_nmi_watchdog();
497}
498
499/*
500 * UV NMI handler
Mike Travis1e019422013-09-23 16:25:00 -0500501 */
502int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
503{
Mike Travis0d12ef02013-09-23 16:25:01 -0500504 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
505 int cpu = smp_processor_id();
506 int master = 0;
507 unsigned long flags;
Mike Travis1e019422013-09-23 16:25:00 -0500508
Mike Travis0d12ef02013-09-23 16:25:01 -0500509 local_irq_save(flags);
Mike Travis1e019422013-09-23 16:25:00 -0500510
Mike Travis0d12ef02013-09-23 16:25:01 -0500511 /* If not a UV System NMI, ignore */
512 if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
513 local_irq_restore(flags);
514 return NMI_DONE;
Mike Travis1e019422013-09-23 16:25:00 -0500515 }
516
Mike Travis0d12ef02013-09-23 16:25:01 -0500517 /* Indicate we are the first CPU into the NMI handler */
518 master = (atomic_read(&uv_nmi_cpu) == cpu);
Mike Travis1e019422013-09-23 16:25:00 -0500519
Mike Travis0d12ef02013-09-23 16:25:01 -0500520 /* Pause as all cpus enter the NMI handler */
521 uv_nmi_wait(master);
Mike Travis1e019422013-09-23 16:25:00 -0500522
Mike Travis0d12ef02013-09-23 16:25:01 -0500523 /* Dump state of each cpu */
Mike Travis3c121d92013-09-23 16:25:02 -0500524 if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
525 uv_nmi_dump_state(cpu, regs, master);
Mike Travis0d12ef02013-09-23 16:25:01 -0500526
527 /* Clear per_cpu "in nmi" flag */
528 atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT);
529
530 /* Clear MMR NMI flag on each hub */
531 uv_clear_nmi(cpu);
532
533 /* Clear global flags */
534 if (master) {
535 if (cpumask_weight(uv_nmi_cpu_mask))
536 uv_nmi_cleanup_mask();
537 atomic_set(&uv_nmi_cpus_in_nmi, -1);
538 atomic_set(&uv_nmi_cpu, -1);
539 atomic_set(&uv_in_nmi, 0);
540 }
541
542 uv_nmi_touch_watchdogs();
543 local_irq_restore(flags);
Mike Travis1e019422013-09-23 16:25:00 -0500544
545 return NMI_HANDLED;
546}
547
Mike Travis0d12ef02013-09-23 16:25:01 -0500548/*
549 * NMI handler for pulling in CPUs when perf events are grabbing our NMI
550 */
551int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
552{
553 int ret;
554
555 uv_cpu_nmi.queries++;
556 if (!atomic_read(&uv_cpu_nmi.pinging)) {
557 local64_inc(&uv_nmi_ping_misses);
558 return NMI_DONE;
559 }
560
561 uv_cpu_nmi.pings++;
562 local64_inc(&uv_nmi_ping_count);
563 ret = uv_handle_nmi(reason, regs);
564 atomic_set(&uv_cpu_nmi.pinging, 0);
565 return ret;
566}
567
Mike Travis1e019422013-09-23 16:25:00 -0500568void uv_register_nmi_notifier(void)
569{
570 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
Mike Travis0d12ef02013-09-23 16:25:01 -0500571 pr_warn("UV: NMI handler failed to register\n");
572
573 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
574 pr_warn("UV: PING NMI handler failed to register\n");
Mike Travis1e019422013-09-23 16:25:00 -0500575}
576
577void uv_nmi_init(void)
578{
579 unsigned int value;
580
581 /*
582 * Unmask NMI on all cpus
583 */
584 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
585 value &= ~APIC_LVT_MASKED;
586 apic_write(APIC_LVT1, value);
587}
588
Mike Travis0d12ef02013-09-23 16:25:01 -0500589void uv_nmi_setup(void)
590{
591 int size = sizeof(void *) * (1 << NODES_SHIFT);
592 int cpu, nid;
593
594 /* Setup hub nmi info */
595 uv_nmi_setup_mmrs();
596 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
597 pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
598 BUG_ON(!uv_hub_nmi_list);
599 size = sizeof(struct uv_hub_nmi_s);
600 for_each_present_cpu(cpu) {
601 nid = cpu_to_node(cpu);
602 if (uv_hub_nmi_list[nid] == NULL) {
603 uv_hub_nmi_list[nid] = kzalloc_node(size,
604 GFP_KERNEL, nid);
605 BUG_ON(!uv_hub_nmi_list[nid]);
606 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
607 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
608 }
609 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
610 }
611 alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL);
612 BUG_ON(!uv_nmi_cpu_mask);
613}
614
615