Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Malta Platform-specific hooks for SMP operation |
| 3 | */ |
Ralf Baechle | 45a98eb | 2007-08-06 16:32:20 +0100 | [diff] [blame] | 4 | #include <linux/irq.h> |
Ralf Baechle | 57a2050 | 2007-03-04 18:27:34 +0000 | [diff] [blame] | 5 | #include <linux/init.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 6 | |
Ralf Baechle | 57a2050 | 2007-03-04 18:27:34 +0000 | [diff] [blame] | 7 | #include <asm/mipsregs.h> |
| 8 | #include <asm/mipsmtregs.h> |
| 9 | #include <asm/smtc.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 10 | #include <asm/smtc_ipi.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 11 | |
| 12 | /* VPE/SMP Prototype implements platform interfaces directly */ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 13 | |
| 14 | /* |
| 15 | * Cause the specified action to be performed on a targeted "CPU" |
| 16 | */ |
| 17 | |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 18 | static void msmtc_send_ipi_single(int cpu, unsigned int action) |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 19 | { |
Ralf Baechle | 57a2050 | 2007-03-04 18:27:34 +0000 | [diff] [blame] | 20 | /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 21 | smtc_send_ipi(cpu, LINUX_SMP_IPI, action); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 22 | } |
| 23 | |
Rusty Russell | 48a048f | 2009-09-24 09:34:44 -0600 | [diff] [blame] | 24 | static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 25 | { |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 26 | unsigned int i; |
| 27 | |
Rusty Russell | 48a048f | 2009-09-24 09:34:44 -0600 | [diff] [blame] | 28 | for_each_cpu(i, mask) |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 29 | msmtc_send_ipi_single(i, action); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 30 | } |
| 31 | |
| 32 | /* |
| 33 | * Post-config but pre-boot cleanup entry point |
| 34 | */ |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 35 | static void __cpuinit msmtc_init_secondary(void) |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 36 | { |
Dmitri Vorobiev | af82558 | 2008-01-24 19:52:45 +0300 | [diff] [blame] | 37 | void smtc_init_secondary(void); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 38 | int myvpe; |
| 39 | |
| 40 | /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ |
| 41 | myvpe = read_c0_tcbind() & TCBIND_CURVPE; |
| 42 | if (myvpe != 0) { |
| 43 | /* Ideally, this should be done only once per VPE, but... */ |
Ralf Baechle | dc0366b | 2007-08-01 19:42:37 +0100 | [diff] [blame] | 44 | clear_c0_status(ST0_IM); |
| 45 | set_c0_status((0x100 << cp0_compare_irq) |
| 46 | | (0x100 << MIPS_CPU_IPI_IRQ)); |
| 47 | if (cp0_perfcount_irq >= 0) |
| 48 | set_c0_status(0x100 << cp0_perfcount_irq); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 49 | } |
| 50 | |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 51 | smtc_init_secondary(); |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * Platform "CPU" startup hook |
| 56 | */ |
| 57 | static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) |
| 58 | { |
| 59 | smtc_boot_secondary(cpu, idle); |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * SMP initialization finalization entry point |
| 64 | */ |
| 65 | static void __cpuinit msmtc_smp_finish(void) |
| 66 | { |
| 67 | smtc_smp_finish(); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Hook for after all CPUs are online |
| 72 | */ |
| 73 | |
| 74 | static void msmtc_cpus_done(void) |
| 75 | { |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | /* |
| 79 | * Platform SMP pre-initialization |
| 80 | * |
| 81 | * As noted above, we can assume a single CPU for now |
| 82 | * but it may be multithreaded. |
| 83 | */ |
| 84 | |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 85 | static void __init msmtc_smp_setup(void) |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 86 | { |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 87 | /* |
| 88 | * we won't get the definitive value until |
| 89 | * we've run smtc_prepare_cpus later, but |
| 90 | * we would appear to need an upper bound now. |
| 91 | */ |
| 92 | smp_num_siblings = smtc_build_cpu_map(0); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 93 | } |
| 94 | |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 95 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 96 | { |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 97 | smtc_prepare_cpus(max_cpus); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 98 | } |
| 99 | |
Ralf Baechle | 87353d8 | 2007-11-19 12:23:51 +0000 | [diff] [blame] | 100 | struct plat_smp_ops msmtc_smp_ops = { |
| 101 | .send_ipi_single = msmtc_send_ipi_single, |
| 102 | .send_ipi_mask = msmtc_send_ipi_mask, |
| 103 | .init_secondary = msmtc_init_secondary, |
| 104 | .smp_finish = msmtc_smp_finish, |
| 105 | .cpus_done = msmtc_cpus_done, |
| 106 | .boot_secondary = msmtc_boot_secondary, |
| 107 | .smp_setup = msmtc_smp_setup, |
| 108 | .prepare_cpus = msmtc_prepare_cpus, |
| 109 | }; |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 110 | |
| 111 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
| 112 | /* |
| 113 | * IRQ affinity hook |
| 114 | */ |
| 115 | |
| 116 | |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 117 | int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 118 | { |
Mike Travis | e65e49d | 2009-01-12 15:27:13 -0800 | [diff] [blame] | 119 | cpumask_t tmask; |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 120 | int cpu = 0; |
| 121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); |
| 122 | |
| 123 | /* |
| 124 | * On the legacy Malta development board, all I/O interrupts |
| 125 | * are routed through the 8259 and combined in a single signal |
| 126 | * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, |
| 127 | * that signal is brought to IP2 of both VPEs. To avoid racing |
| 128 | * concurrent interrupt service events, IP2 is enabled only on |
| 129 | * one VPE, by convention VPE0. So long as no bits are ever |
| 130 | * cleared in the affinity mask, there will never be any |
| 131 | * interrupt forwarding. But as soon as a program or operator |
| 132 | * sets affinity for one of the related IRQs, we need to make |
| 133 | * sure that we don't ever try to forward across the VPE boundry, |
| 134 | * at least not until we engineer a system where the interrupt |
| 135 | * _ack() or _end() function can somehow know that it corresponds |
| 136 | * to an interrupt taken on another VPE, and perform the appropriate |
| 137 | * restoration of Status.IM state using MFTR/MTTR instead of the |
| 138 | * normal local behavior. We also ensure that no attempt will |
| 139 | * be made to forward to an offline "CPU". |
| 140 | */ |
| 141 | |
Mike Travis | e65e49d | 2009-01-12 15:27:13 -0800 | [diff] [blame] | 142 | cpumask_copy(&tmask, affinity); |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 143 | for_each_cpu(cpu, affinity) { |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 144 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) |
| 145 | cpu_clear(cpu, tmask); |
| 146 | } |
Mike Travis | e65e49d | 2009-01-12 15:27:13 -0800 | [diff] [blame] | 147 | cpumask_copy(irq_desc[irq].affinity, &tmask); |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 148 | |
| 149 | if (cpus_empty(tmask)) |
| 150 | /* |
| 151 | * We could restore a default mask here, but the |
| 152 | * runtime code can anyway deal with the null set |
| 153 | */ |
| 154 | printk(KERN_WARNING |
| 155 | "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); |
| 156 | |
| 157 | /* Do any generic SMTC IRQ affinity setup */ |
| 158 | smtc_set_irq_affinity(irq, tmask); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 159 | |
| 160 | return 0; |
Kevin D. Kissell | f571eff | 2007-08-03 19:38:03 +0200 | [diff] [blame] | 161 | } |
| 162 | #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |