blob: 3cdbeaeac397e1bfafd74a2cea21b934ba3561a0 [file] [log] [blame]
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +10001/*
2 * Copyright 2016,2017 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef _ASM_POWERPC_XIVE_H
10#define _ASM_POWERPC_XIVE_H
11
12#define XIVE_INVALID_VP 0xffffffff
13
14#ifdef CONFIG_PPC_XIVE
15
16/*
17 * Thread Interrupt Management Area (TIMA)
18 *
19 * This is a global MMIO region divided in 4 pages of varying access
20 * permissions, providing access to per-cpu interrupt management
21 * functions. It always identifies the CPU doing the access based
22 * on the PowerBus initiator ID, thus we always access via the
23 * same offset regardless of where the code is executing
24 */
25extern void __iomem *xive_tima;
26
27/*
28 * Offset in the TM area of our current execution level (provided by
29 * the backend)
30 */
31extern u32 xive_tima_offset;
32
33/*
34 * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
35 * have it stored in the xive_cpu structure. We also cache
36 * for normal interrupts the current target CPU.
37 *
38 * This structure is setup by the backend for each interrupt.
39 */
40struct xive_irq_data {
41 u64 flags;
42 u64 eoi_page;
43 void __iomem *eoi_mmio;
44 u64 trig_page;
45 void __iomem *trig_mmio;
46 u32 esb_shift;
47 int src_chip;
48
49 /* Setup/used by frontend */
50 int target;
51 bool saved_p;
52};
53#define XIVE_IRQ_FLAG_STORE_EOI 0x01
54#define XIVE_IRQ_FLAG_LSI 0x02
55#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
56#define XIVE_IRQ_FLAG_MASK_FW 0x08
57#define XIVE_IRQ_FLAG_EOI_FW 0x10
58
59#define XIVE_INVALID_CHIP_ID -1
60
61/* A queue tracking structure in a CPU */
62struct xive_q {
63 __be32 *qpage;
64 u32 msk;
65 u32 idx;
66 u32 toggle;
67 u64 eoi_phys;
68 u32 esc_irq;
69 atomic_t count;
70 atomic_t pending_count;
71};
72
73/*
74 * "magic" Event State Buffer (ESB) MMIO offsets.
75 *
76 * Each interrupt source has a 2-bit state machine called ESB
77 * which can be controlled by MMIO. It's made of 2 bits, P and
78 * Q. P indicates that an interrupt is pending (has been sent
79 * to a queue and is waiting for an EOI). Q indicates that the
80 * interrupt has been triggered while pending.
81 *
82 * This acts as a coalescing mechanism in order to guarantee
83 * that a given interrupt only occurs at most once in a queue.
84 *
85 * When doing an EOI, the Q bit will indicate if the interrupt
86 * needs to be re-triggered.
87 *
88 * The following offsets into the ESB MMIO allow to read or
89 * manipulate the PQ bits. They must be used with an 8-bytes
90 * load instruction. They all return the previous state of the
91 * interrupt (atomically).
92 *
93 * Additionally, some ESB pages support doing an EOI via a
94 * store at 0 and some ESBs support doing a trigger via a
95 * separate trigger page.
96 */
97#define XIVE_ESB_GET 0x800
98#define XIVE_ESB_SET_PQ_00 0xc00
99#define XIVE_ESB_SET_PQ_01 0xd00
100#define XIVE_ESB_SET_PQ_10 0xe00
101#define XIVE_ESB_SET_PQ_11 0xf00
102#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01
103
104#define XIVE_ESB_VAL_P 0x2
105#define XIVE_ESB_VAL_Q 0x1
106
107/* Global enable flags for the XIVE support */
108extern bool __xive_enabled;
109
110static inline bool xive_enabled(void) { return __xive_enabled; }
111
112extern bool xive_native_init(void);
113extern void xive_smp_probe(void);
114extern int xive_smp_prepare_cpu(unsigned int cpu);
115extern void xive_smp_setup_cpu(void);
116extern void xive_smp_disable_cpu(void);
117extern void xive_kexec_teardown_cpu(int secondary);
118extern void xive_shutdown(void);
119extern void xive_flush_interrupt(void);
120
121/* xmon hook */
122extern void xmon_xive_do_dump(int cpu);
123
124/* APIs used by KVM */
125extern u32 xive_native_default_eq_shift(void);
126extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
127extern void xive_native_free_vp_block(u32 vp_base);
128extern int xive_native_populate_irq_data(u32 hw_irq,
129 struct xive_irq_data *data);
130extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
131extern u32 xive_native_alloc_irq(void);
132extern void xive_native_free_irq(u32 irq);
133extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
134
135extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
136 __be32 *qpage, u32 order, bool can_escalate);
137extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
138
139extern bool __xive_irq_trigger(struct xive_irq_data *xd);
140extern bool __xive_irq_retrigger(struct xive_irq_data *xd);
141extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd);
142
143extern bool is_xive_irq(struct irq_chip *chip);
144
145#else
146
147static inline bool xive_enabled(void) { return false; }
148
149static inline bool xive_native_init(void) { return false; }
150static inline void xive_smp_probe(void) { }
151extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
152static inline void xive_smp_setup_cpu(void) { }
153static inline void xive_smp_disable_cpu(void) { }
154static inline void xive_kexec_teardown_cpu(int secondary) { }
155static inline void xive_shutdown(void) { }
156static inline void xive_flush_interrupt(void) { }
157
158static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
159static inline void xive_native_free_vp_block(u32 vp_base) { }
160
161#endif
162
163#endif /* _ASM_POWERPC_XIVE_H */