blob: 5d06253c2a7a385df2a2d951c49992a006d21600 [file] [log] [blame]
Jan Glauber779e6e12008-07-17 17:16:48 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2000, 2009
Jan Glauber779e6e12008-07-17 17:16:48 +02003 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
4 * Cornelia Huck <cornelia.huck@de.ibm.com>
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Jan Glauber30d77c32011-01-05 12:47:29 +01009#include <linux/kernel_stat.h>
Arun Sharma600634972011-07-26 16:09:06 -070010#include <linux/atomic.h>
Jan Glauber779e6e12008-07-17 17:16:48 +020011#include <asm/debug.h>
12#include <asm/qdio.h>
13#include <asm/airq.h>
14#include <asm/isc.h>
15
16#include "cio.h"
17#include "ioasm.h"
18#include "qdio.h"
19#include "qdio_debug.h"
Jan Glauber779e6e12008-07-17 17:16:48 +020020
21/*
22 * Restriction: only 63 iqdio subchannels would have its own indicator,
23 * after that, subsequent subchannels share one indicator
24 */
25#define TIQDIO_NR_NONSHARED_IND 63
26#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
Jan Glauber5f4026f2011-10-30 15:17:20 +010027#define TIQDIO_SHARED_IND 63
28
29/* device state change indicators */
30struct indicator_t {
31 u32 ind; /* u32 because of compare-and-swap performance */
32 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
33};
Jan Glauber779e6e12008-07-17 17:16:48 +020034
35/* list of thin interrupt input queues */
36static LIST_HEAD(tiq_list);
Martin Schwidefskyc4736d92011-10-30 15:17:11 +010037static DEFINE_MUTEX(tiq_list_lock);
Jan Glauber779e6e12008-07-17 17:16:48 +020038
Martin Schwidefskyf4eae942013-06-24 10:30:41 +020039/* Adapter interrupt definitions */
40static void tiqdio_thinint_handler(struct airq_struct *airq);
41
42static struct airq_struct tiqdio_airq = {
43 .handler = tiqdio_thinint_handler,
44 .isc = QDIO_AIRQ_ISC,
45};
Jan Glauber779e6e12008-07-17 17:16:48 +020046
Jan Glauber5f4026f2011-10-30 15:17:20 +010047static struct indicator_t *q_indicators;
Jan Glauber779e6e12008-07-17 17:16:48 +020048
Jan Glaubera2b86012011-10-30 15:17:05 +010049u64 last_ai_time;
Jan Glauberd36deae2010-09-07 21:14:39 +000050
Jan Glauber779e6e12008-07-17 17:16:48 +020051/* returns addr for the device state change indicator */
52static u32 *get_indicator(void)
53{
54 int i;
55
56 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
57 if (!atomic_read(&q_indicators[i].count)) {
58 atomic_set(&q_indicators[i].count, 1);
59 return &q_indicators[i].ind;
60 }
61
62 /* use the shared indicator */
63 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
64 return &q_indicators[TIQDIO_SHARED_IND].ind;
65}
66
67static void put_indicator(u32 *addr)
68{
69 int i;
70
71 if (!addr)
72 return;
73 i = ((unsigned long)addr - (unsigned long)q_indicators) /
74 sizeof(struct indicator_t);
75 atomic_dec(&q_indicators[i].count);
76}
77
78void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
79{
Jan Glauberb4547402009-03-26 15:24:24 +010080 mutex_lock(&tiq_list_lock);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000081 list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
Jan Glauberb4547402009-03-26 15:24:24 +010082 mutex_unlock(&tiq_list_lock);
Jan Glauberd0c9d4a2010-05-17 10:00:18 +020083 xchg(irq_ptr->dsci, 1 << 7);
Jan Glauber779e6e12008-07-17 17:16:48 +020084}
85
Jan Glauber779e6e12008-07-17 17:16:48 +020086void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
87{
88 struct qdio_q *q;
Jan Glauber779e6e12008-07-17 17:16:48 +020089
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000090 q = irq_ptr->input_qs[0];
91 /* if establish triggered an error */
92 if (!q || !q->entry.prev || !q->entry.next)
93 return;
Jan Glauberb4547402009-03-26 15:24:24 +010094
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000095 mutex_lock(&tiq_list_lock);
96 list_del_rcu(&q->entry);
97 mutex_unlock(&tiq_list_lock);
98 synchronize_rcu();
Jan Glauber779e6e12008-07-17 17:16:48 +020099}
100
Jan Glauber5f4026f2011-10-30 15:17:20 +0100101static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
102{
103 return irq_ptr->nr_input_qs > 1;
104}
105
106static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
107{
108 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
109}
110
111static inline int shared_ind(struct qdio_irq *irq_ptr)
112{
113 return references_shared_dsci(irq_ptr) ||
114 has_multiple_inq_on_dsci(irq_ptr);
115}
116
117void clear_nonshared_ind(struct qdio_irq *irq_ptr)
118{
119 if (!is_thinint_irq(irq_ptr))
120 return;
121 if (shared_ind(irq_ptr))
122 return;
123 xchg(irq_ptr->dsci, 0);
124}
125
126int test_nonshared_ind(struct qdio_irq *irq_ptr)
127{
128 if (!is_thinint_irq(irq_ptr))
129 return 0;
130 if (shared_ind(irq_ptr))
131 return 0;
132 if (*irq_ptr->dsci)
133 return 1;
134 else
135 return 0;
136}
137
Jan Glauberb02f0c22011-07-24 10:48:00 +0200138static inline u32 clear_shared_ind(void)
Jan Glauber779e6e12008-07-17 17:16:48 +0200139{
Jan Glauberb02f0c22011-07-24 10:48:00 +0200140 if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
141 return 0;
142 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200143}
144
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000145static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
146{
147 struct qdio_q *q;
148 int i;
149
150 for_each_input_queue(irq, q, i) {
151 if (!references_shared_dsci(irq) &&
152 has_multiple_inq_on_dsci(irq))
153 xchg(q->irq_ptr->dsci, 0);
154
155 if (q->u.in.queue_start_poll) {
156 /* skip if polling is enabled or already in work */
157 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
158 &q->u.in.queue_irq_state)) {
159 qperf_inc(q, int_discarded);
160 continue;
161 }
162
163 /* avoid dsci clear here, done after processing */
164 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
165 q->irq_ptr->int_parm);
166 } else {
Jan Glauber5f4026f2011-10-30 15:17:20 +0100167 if (!shared_ind(q->irq_ptr))
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000168 xchg(q->irq_ptr->dsci, 0);
169
170 /*
171 * Call inbound processing but not directly
172 * since that could starve other thinint queues.
173 */
174 tasklet_schedule(&q->tasklet);
175 }
176 }
177}
178
Jan Glaubercf9a0312009-06-22 12:08:13 +0200179/**
180 * tiqdio_thinint_handler - thin interrupt handler for qdio
Jan Glauberd36deae2010-09-07 21:14:39 +0000181 * @alsi: pointer to adapter local summary indicator
182 * @data: NULL
Jan Glaubercf9a0312009-06-22 12:08:13 +0200183 */
Martin Schwidefskyf4eae942013-06-24 10:30:41 +0200184static void tiqdio_thinint_handler(struct airq_struct *airq)
Jan Glauber779e6e12008-07-17 17:16:48 +0200185{
Jan Glauberb02f0c22011-07-24 10:48:00 +0200186 u32 si_used = clear_shared_ind();
Jan Glauber779e6e12008-07-17 17:16:48 +0200187 struct qdio_q *q;
188
Jan Glauberd36deae2010-09-07 21:14:39 +0000189 last_ai_time = S390_lowcore.int_clock;
Heiko Carstens420f42e2013-01-02 15:18:18 +0100190 inc_irq_stat(IRQIO_QAI);
Jan Glauberd36deae2010-09-07 21:14:39 +0000191
Jan Glauber779e6e12008-07-17 17:16:48 +0200192 /* protect tiq_list entries, only changed in activate or shutdown */
193 rcu_read_lock();
194
Jan Glaubercf9a0312009-06-22 12:08:13 +0200195 /* check for work on all inbound thinint queues */
Jan Glauberd36deae2010-09-07 21:14:39 +0000196 list_for_each_entry_rcu(q, &tiq_list, entry) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000197 struct qdio_irq *irq;
Jan Glauber779e6e12008-07-17 17:16:48 +0200198
Jan Glauberd36deae2010-09-07 21:14:39 +0000199 /* only process queues from changed sets */
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000200 irq = q->irq_ptr;
201 if (unlikely(references_shared_dsci(irq))) {
Jan Glauber4f325182011-01-05 12:47:49 +0100202 if (!si_used)
203 continue;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000204 } else if (!*irq->dsci)
Jan Glauberd36deae2010-09-07 21:14:39 +0000205 continue;
206
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000207 tiqdio_call_inq_handlers(irq);
Jan Glauberd36deae2010-09-07 21:14:39 +0000208
Jan Glauberd36deae2010-09-07 21:14:39 +0000209 qperf_inc(q, adapter_int);
210 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200211 rcu_read_unlock();
Jan Glauber779e6e12008-07-17 17:16:48 +0200212}
213
214static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
215{
Sebastian Ottca4ba152013-06-05 18:59:22 +0200216 struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
217 u64 summary_indicator_addr, subchannel_indicator_addr;
Jan Glauber779e6e12008-07-17 17:16:48 +0200218 int rc;
219
Jan Glauber779e6e12008-07-17 17:16:48 +0200220 if (reset) {
Sebastian Ottca4ba152013-06-05 18:59:22 +0200221 summary_indicator_addr = 0;
222 subchannel_indicator_addr = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200223 } else {
Martin Schwidefskyf4eae942013-06-24 10:30:41 +0200224 summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
Sebastian Ottca4ba152013-06-05 18:59:22 +0200225 subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
Jan Glauber779e6e12008-07-17 17:16:48 +0200226 }
227
Sebastian Ottca4ba152013-06-05 18:59:22 +0200228 rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
229 subchannel_indicator_addr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200230 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +0100231 DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
Sebastian Ottca4ba152013-06-05 18:59:22 +0200232 scssc->response.code);
233 goto out;
Jan Glauber779e6e12008-07-17 17:16:48 +0200234 }
235
Jan Glauber22f99342008-12-25 13:38:46 +0100236 DBF_EVENT("setscind");
Sebastian Ottca4ba152013-06-05 18:59:22 +0200237 DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
238 DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
239out:
240 return rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200241}
242
243/* allocate non-shared indicators and shared indicator */
244int __init tiqdio_allocate_memory(void)
245{
246 q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
247 GFP_KERNEL);
248 if (!q_indicators)
249 return -ENOMEM;
250 return 0;
251}
252
253void tiqdio_free_memory(void)
254{
255 kfree(q_indicators);
256}
257
258int __init tiqdio_register_thinints(void)
259{
Martin Schwidefskyf4eae942013-06-24 10:30:41 +0200260 int rc;
261
262 rc = register_adapter_interrupt(&tiqdio_airq);
263 if (rc) {
264 DBF_EVENT("RTI:%x", rc);
265 return rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200266 }
267 return 0;
268}
269
270int qdio_establish_thinint(struct qdio_irq *irq_ptr)
271{
272 if (!is_thinint_irq(irq_ptr))
273 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200274 return set_subchannel_ind(irq_ptr, 0);
275}
276
277void qdio_setup_thinint(struct qdio_irq *irq_ptr)
278{
279 if (!is_thinint_irq(irq_ptr))
280 return;
281 irq_ptr->dsci = get_indicator();
Jan Glauber22f99342008-12-25 13:38:46 +0100282 DBF_HEX(&irq_ptr->dsci, sizeof(void *));
Jan Glauber779e6e12008-07-17 17:16:48 +0200283}
284
285void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
286{
287 if (!is_thinint_irq(irq_ptr))
288 return;
289
290 /* reset adapter interrupt indicators */
Jan Glauber779e6e12008-07-17 17:16:48 +0200291 set_subchannel_ind(irq_ptr, 1);
Jan Glauber4814a2b2010-11-25 09:52:46 +0100292 put_indicator(irq_ptr->dsci);
Jan Glauber779e6e12008-07-17 17:16:48 +0200293}
294
295void __exit tiqdio_unregister_thinints(void)
296{
Jan Glauber9e890ad2009-03-26 15:24:30 +0100297 WARN_ON(!list_empty(&tiq_list));
Martin Schwidefskyf4eae942013-06-24 10:30:41 +0200298 unregister_adapter_interrupt(&tiqdio_airq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200299}