blob: bb1c09f7a76ced4cd028d631d79a13d0171e8c2c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_IPI_H
2#define __ASM_IPI_H
3
4/*
5 * Copyright 2004 James Cleverdon, IBM.
6 * Subject to the GNU Public License, v.2
7 *
8 * Generic APIC InterProcessor Interrupt code.
9 *
10 * Moved to include file by James Cleverdon from
11 * arch/x86-64/kernel/smp.c
12 *
13 * Copyrights from kernel/smp.c:
14 *
15 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
16 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
17 * (c) 2002,2003 Andi Kleen, SuSE Labs.
18 * Subject to the GNU Public License, v.2
19 */
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/hw_irq.h>
Jan Beulich00f1ea62007-05-02 19:27:04 +020022#include <asm/apic.h>
Paul Jacksone3f8ba82008-05-14 08:15:04 -070023#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/*
26 * the following functions deal with sending IPIs between CPUs.
27 *
28 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
29 */
30
Joe Perches061b3d92008-03-23 01:02:27 -070031static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
32 unsigned int dest)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
Jan Beulich1a426cb2005-09-12 18:49:24 +020034 unsigned int icr = shortcut | dest;
35
36 switch (vector) {
37 default:
38 icr |= APIC_DM_FIXED | vector;
39 break;
40 case NMI_VECTOR:
Jan Beulich1a426cb2005-09-12 18:49:24 +020041 icr |= APIC_DM_NMI;
42 break;
43 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 return icr;
45}
46
Joe Perches061b3d92008-03-23 01:02:27 -070047static inline int __prepare_ICR2(unsigned int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
49 return SET_APIC_DEST_FIELD(mask);
50}
51
Joe Perches061b3d92008-03-23 01:02:27 -070052static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
53 unsigned int dest)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
55 /*
56 * Subtle. In the case of the 'never do double writes' workaround
57 * we have to lock out interrupts to be safe. As we don't care
58 * of the value read we use an atomic rmw access to avoid costly
59 * cli/sti. Otherwise we use an even cheaper single atomic write
60 * to the APIC.
61 */
62 unsigned int cfg;
63
64 /*
65 * Wait for idle.
66 */
67 apic_wait_icr_idle();
68
69 /*
70 * No need to touch the target chip field
71 */
72 cfg = __prepare_ICR(shortcut, vector, dest);
73
74 /*
75 * Send the IPI. The write to APIC_ICR fires this off.
76 */
Andi Kleeneddfb4e2005-09-12 18:49:23 +020077 apic_write(APIC_ICR, cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
Fernando Luis [** ISO-8859-1 charset **] VázquezCao9062d882007-05-02 19:27:18 +020080/*
81 * This is used to send an IPI with no shorthand notation (the destination is
82 * specified in bits 56 to 63 of the ICR).
83 */
Joe Perches061b3d92008-03-23 01:02:27 -070084static inline void __send_IPI_dest_field(unsigned int mask, int vector,
85 unsigned int dest)
Fernando Luis [** ISO-8859-1 charset **] VázquezCao9062d882007-05-02 19:27:18 +020086{
87 unsigned long cfg;
88
89 /*
90 * Wait for idle.
91 */
Fernando Luis [** ISO-8859-1 charset **] VázquezCao70ae77f2007-05-02 19:27:18 +020092 if (unlikely(vector == NMI_VECTOR))
93 safe_apic_wait_icr_idle();
94 else
95 apic_wait_icr_idle();
Fernando Luis [** ISO-8859-1 charset **] VázquezCao9062d882007-05-02 19:27:18 +020096
97 /*
98 * prepare target chip field
99 */
100 cfg = __prepare_ICR2(mask);
101 apic_write(APIC_ICR2, cfg);
102
103 /*
104 * program the ICR
105 */
106 cfg = __prepare_ICR(0, vector, dest);
107
108 /*
109 * Send the IPI. The write to APIC_ICR fires this off.
110 */
111 apic_write(APIC_ICR, cfg);
112}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
115{
Fernando Luis [** ISO-8859-1 charset **] VázquezCao9062d882007-05-02 19:27:18 +0200116 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 unsigned long query_cpu;
118
119 /*
120 * Hack. The clustered APIC addressing mode doesn't allow us to send
121 * to an arbitrary mask, so I do a unicast to each CPU instead.
122 * - mbligh
123 */
124 local_irq_save(flags);
Mike Travis334ef7a2008-05-12 21:21:13 +0200125 for_each_cpu_mask_nr(query_cpu, mask) {
Mike Travis71fff5e2007-10-19 20:35:03 +0200126 __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
Fernando Luis [** ISO-8859-1 charset **] VázquezCao9062d882007-05-02 19:27:18 +0200127 vector, APIC_DEST_PHYSICAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 }
129 local_irq_restore(flags);
130}
131
132#endif /* __ASM_IPI_H */