blob: d2c1f1becd4feaa8a1a15af4e68ddb9fdc262aab [file] [log] [blame]
Jan Glauber779e6e12008-07-17 17:16:48 +02001/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Jan Glauber30d77c32011-01-05 12:47:29 +010017#include <linux/kernel_stat.h>
Arun Sharma60063492011-07-26 16:09:06 -070018#include <linux/atomic.h>
Jan Glauber779e6e12008-07-17 17:16:48 +020019#include <asm/debug.h>
20#include <asm/qdio.h>
21
22#include "cio.h"
23#include "css.h"
24#include "device.h"
25#include "qdio.h"
26#include "qdio_debug.h"
Jan Glauber779e6e12008-07-17 17:16:48 +020027
28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30MODULE_DESCRIPTION("QDIO base support");
31MODULE_LICENSE("GPL");
32
Jan Glauber958c0ba2011-01-05 12:47:52 +010033static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020036{
Jan Glauber958c0ba2011-01-05 12:47:52 +010037 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020039 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
41 int cc;
42
43 asm volatile(
44 " siga 0\n"
45 " ipm %0\n"
46 " srl %0,28\n"
47 : "=d" (cc)
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
49 return cc;
50}
51
Jan Glauber958c0ba2011-01-05 12:47:52 +010052static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020054{
Jan Glauber958c0ba2011-01-05 12:47:52 +010055 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020057 register unsigned long __mask asm ("2") = mask;
58 int cc;
59
60 asm volatile(
61 " siga 0\n"
62 " ipm %0\n"
63 " srl %0,28\n"
64 : "=d" (cc)
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
66 return cc;
67}
68
69/**
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
75 *
76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
77 * Note: For IQDC unicast queues only the highest priority queue is processed.
78 */
79static inline int do_siga_output(unsigned long schid, unsigned long mask,
Jan Glauber7a0b4cb2008-12-25 13:38:48 +010080 unsigned int *bb, unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020081{
82 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask;
85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86
87 asm volatile(
88 " siga 0\n"
89 "0: ipm %0\n"
90 " srl %0,28\n"
91 "1:\n"
92 EX_TABLE(0b, 1b)
93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
94 : : "cc", "memory");
95 *bb = ((unsigned int) __fc) >> 31;
96 return cc;
97}
98
99static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
100{
Jan Glauber779e6e12008-07-17 17:16:48 +0200101 /* all done or next buffer state different */
102 if (ccq == 0 || ccq == 32)
103 return 0;
104 /* not all buffers processed */
105 if (ccq == 96 || ccq == 97)
106 return 1;
107 /* notify devices immediately */
Jan Glauber22f99342008-12-25 13:38:46 +0100108 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200109 return -EIO;
110}
111
112/**
113 * qdio_do_eqbs - extract buffer states for QEBSM
114 * @q: queue to manipulate
115 * @state: state of the extracted buffers
116 * @start: buffer number to start at
117 * @count: count of buffers to examine
Jan Glauber50f769d2008-12-25 13:38:47 +0100118 * @auto_ack: automatically acknowledge buffers
Jan Glauber779e6e12008-07-17 17:16:48 +0200119 *
Coly Li73ac36e2009-01-07 18:09:16 -0800120 * Returns the number of successfully extracted equal buffer states.
Jan Glauber779e6e12008-07-17 17:16:48 +0200121 * Stops processing if a state is different from the last buffers state.
122 */
123static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
Jan Glauber50f769d2008-12-25 13:38:47 +0100124 int start, int count, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200125{
126 unsigned int ccq = 0;
127 int tmp_count = count, tmp_start = start;
128 int nr = q->nr;
129 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200130
131 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100132 qperf_inc(q, eqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200133
134 if (!q->is_input_q)
135 nr += q->irq_ptr->nr_input_qs;
136again:
Jan Glauber50f769d2008-12-25 13:38:47 +0100137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138 auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200139 rc = qdio_check_ccq(q, ccq);
140
141 /* At least one buffer was processed, return and extract the remaining
142 * buffers later.
143 */
Jan Glauber23589d02008-12-25 13:38:44 +0100144 if ((ccq == 96) && (count != tmp_count)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100145 qperf_inc(q, eqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200146 return (count - tmp_count);
Jan Glauber23589d02008-12-25 13:38:44 +0100147 }
Jan Glauber22f99342008-12-25 13:38:46 +0100148
Jan Glauber779e6e12008-07-17 17:16:48 +0200149 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200151 goto again;
152 }
153
154 if (rc < 0) {
Jan Glauber22f99342008-12-25 13:38:46 +0100155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
156 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200157 q->handler(q->irq_ptr->cdev,
158 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
Swen Schilligdfe5bb52011-08-15 14:40:31 +0200159 q->nr, q->first_to_kick, count,
160 q->irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200161 return 0;
162 }
163 return count - tmp_count;
164}
165
166/**
167 * qdio_do_sqbs - set buffer states for QEBSM
168 * @q: queue to manipulate
169 * @state: new state of the buffers
170 * @start: first buffer number to change
171 * @count: how many buffers to change
172 *
173 * Returns the number of successfully changed buffers.
174 * Does retrying until the specified count of buffer states is set or an
175 * error occurs.
176 */
177static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
178 int count)
179{
180 unsigned int ccq = 0;
181 int tmp_count = count, tmp_start = start;
182 int nr = q->nr;
183 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200184
Jan Glauber50f769d2008-12-25 13:38:47 +0100185 if (!count)
186 return 0;
187
Jan Glauber779e6e12008-07-17 17:16:48 +0200188 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100189 qperf_inc(q, sqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200190
191 if (!q->is_input_q)
192 nr += q->irq_ptr->nr_input_qs;
193again:
194 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
195 rc = qdio_check_ccq(q, ccq);
196 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100197 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
Jan Glauber6486cda2010-01-04 09:05:42 +0100198 qperf_inc(q, sqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200199 goto again;
200 }
201 if (rc < 0) {
Jan Glauber22f99342008-12-25 13:38:46 +0100202 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
203 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200204 q->handler(q->irq_ptr->cdev,
205 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
Swen Schilligdfe5bb52011-08-15 14:40:31 +0200206 q->nr, q->first_to_kick, count,
207 q->irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200208 return 0;
209 }
210 WARN_ON(tmp_count);
211 return count - tmp_count;
212}
213
214/* returns number of examined buffers and their common state in *state */
215static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
Jan Glauber50f769d2008-12-25 13:38:47 +0100216 unsigned char *state, unsigned int count,
217 int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200218{
219 unsigned char __state = 0;
220 int i;
221
222 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
223 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
224
225 if (is_qebsm(q))
Jan Glauber50f769d2008-12-25 13:38:47 +0100226 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200227
228 for (i = 0; i < count; i++) {
229 if (!__state)
230 __state = q->slsb.val[bufnr];
231 else if (q->slsb.val[bufnr] != __state)
232 break;
233 bufnr = next_buf(bufnr);
234 }
235 *state = __state;
236 return i;
237}
238
Jan Glauber60b5df22009-06-22 12:08:10 +0200239static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
240 unsigned char *state, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200241{
Jan Glauber50f769d2008-12-25 13:38:47 +0100242 return get_buf_states(q, bufnr, state, 1, auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200243}
244
245/* wrap-around safe setting of slsb states, returns number of changed buffers */
246static inline int set_buf_states(struct qdio_q *q, int bufnr,
247 unsigned char state, int count)
248{
249 int i;
250
251 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
252 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
253
254 if (is_qebsm(q))
255 return qdio_do_sqbs(q, state, bufnr, count);
256
257 for (i = 0; i < count; i++) {
258 xchg(&q->slsb.val[bufnr], state);
259 bufnr = next_buf(bufnr);
260 }
261 return count;
262}
263
264static inline int set_buf_state(struct qdio_q *q, int bufnr,
265 unsigned char state)
266{
267 return set_buf_states(q, bufnr, state, 1);
268}
269
270/* set slsb states to initial state */
271void qdio_init_buf_states(struct qdio_irq *irq_ptr)
272{
273 struct qdio_q *q;
274 int i;
275
276 for_each_input_queue(irq_ptr, q, i)
277 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
278 QDIO_MAX_BUFFERS_PER_Q);
279 for_each_output_queue(irq_ptr, q, i)
280 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
281 QDIO_MAX_BUFFERS_PER_Q);
282}
283
Jan Glauber60b5df22009-06-22 12:08:10 +0200284static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
Jan Glauber779e6e12008-07-17 17:16:48 +0200285 unsigned int input)
286{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100287 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
288 unsigned int fc = QDIO_SIGA_SYNC;
Jan Glauber779e6e12008-07-17 17:16:48 +0200289 int cc;
290
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100292 qperf_inc(q, siga_sync);
Jan Glauber779e6e12008-07-17 17:16:48 +0200293
Jan Glauber958c0ba2011-01-05 12:47:52 +0100294 if (is_qebsm(q)) {
295 schid = q->irq_ptr->sch_token;
296 fc |= QDIO_SIGA_QEBSM_FLAG;
297 }
298
299 cc = do_siga_sync(schid, output, input, fc);
Jan Glauber110da312011-01-05 12:47:53 +0100300 if (unlikely(cc))
Jan Glauber22f99342008-12-25 13:38:46 +0100301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200302 return cc;
303}
304
Jan Glauber60b5df22009-06-22 12:08:10 +0200305static inline int qdio_siga_sync_q(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200306{
307 if (q->is_input_q)
308 return qdio_siga_sync(q, 0, q->mask);
309 else
310 return qdio_siga_sync(q, q->mask, 0);
311}
312
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100313static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
Jan Glauber779e6e12008-07-17 17:16:48 +0200314{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100315 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
316 unsigned int fc = QDIO_SIGA_WRITE;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100317 u64 start_time = 0;
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200318 int retries = 0, cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200319
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100320 if (is_qebsm(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200321 schid = q->irq_ptr->sch_token;
Jan Glauber958c0ba2011-01-05 12:47:52 +0100322 fc |= QDIO_SIGA_QEBSM_FLAG;
Jan Glauber779e6e12008-07-17 17:16:48 +0200323 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200324again:
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100325 cc = do_siga_output(schid, q->mask, busy_bit, fc);
Jan Glauber58eb27c2008-08-21 19:46:34 +0200326
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100327 /* hipersocket busy condition */
Jan Glauber110da312011-01-05 12:47:53 +0100328 if (unlikely(*busy_bit)) {
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100329 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200330 retries++;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100331
332 if (!start_time) {
Jan Glauber3a601bf2010-05-17 10:00:17 +0200333 start_time = get_clock();
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100334 goto again;
335 }
Jan Glauber3a601bf2010-05-17 10:00:17 +0200336 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
Jan Glauber779e6e12008-07-17 17:16:48 +0200337 goto again;
338 }
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200339 if (retries) {
340 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
341 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
342 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
343 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200344 return cc;
345}
346
347static inline int qdio_siga_input(struct qdio_q *q)
348{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100349 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
350 unsigned int fc = QDIO_SIGA_READ;
Jan Glauber779e6e12008-07-17 17:16:48 +0200351 int cc;
352
Jan Glauber22f99342008-12-25 13:38:46 +0100353 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100354 qperf_inc(q, siga_read);
Jan Glauber779e6e12008-07-17 17:16:48 +0200355
Jan Glauber958c0ba2011-01-05 12:47:52 +0100356 if (is_qebsm(q)) {
357 schid = q->irq_ptr->sch_token;
358 fc |= QDIO_SIGA_QEBSM_FLAG;
359 }
360
361 cc = do_siga_input(schid, q->mask, fc);
Jan Glauber110da312011-01-05 12:47:53 +0100362 if (unlikely(cc))
Jan Glauber22f99342008-12-25 13:38:46 +0100363 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200364 return cc;
365}
366
Jan Glauber90adac52011-01-05 12:47:54 +0100367#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
368#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
369
370static inline void qdio_sync_queues(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200371{
Jan Glauber90adac52011-01-05 12:47:54 +0100372 /* PCI capable outbound queues will also be scanned so sync them too */
373 if (pci_out_supported(q))
374 qdio_siga_sync_all(q);
375 else
Jan Glauber779e6e12008-07-17 17:16:48 +0200376 qdio_siga_sync_q(q);
377}
378
Jan Glauber60b5df22009-06-22 12:08:10 +0200379int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
380 unsigned char *state)
381{
Jan Glauber90adac52011-01-05 12:47:54 +0100382 if (need_siga_sync(q))
383 qdio_siga_sync_q(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200384 return get_buf_states(q, bufnr, state, 1, 0);
385}
386
387static inline void qdio_stop_polling(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200388{
Jan Glauber50f769d2008-12-25 13:38:47 +0100389 if (!q->u.in.polling)
Jan Glauber779e6e12008-07-17 17:16:48 +0200390 return;
Jan Glauber50f769d2008-12-25 13:38:47 +0100391
Jan Glauber779e6e12008-07-17 17:16:48 +0200392 q->u.in.polling = 0;
Jan Glauber6486cda2010-01-04 09:05:42 +0100393 qperf_inc(q, stop_polling);
Jan Glauber779e6e12008-07-17 17:16:48 +0200394
395 /* show the card that we are not polling anymore */
Jan Glauber50f769d2008-12-25 13:38:47 +0100396 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +0100397 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100398 q->u.in.ack_count);
399 q->u.in.ack_count = 0;
400 } else
Jan Glaubere85dea02009-03-26 15:24:29 +0100401 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber779e6e12008-07-17 17:16:48 +0200402}
403
Jan Glauberd3072972010-02-26 22:37:36 +0100404static inline void account_sbals(struct qdio_q *q, int count)
405{
406 int pos = 0;
407
408 q->q_stats.nr_sbal_total += count;
409 if (count == QDIO_MAX_BUFFERS_MASK) {
410 q->q_stats.nr_sbals[7]++;
411 return;
412 }
413 while (count >>= 1)
414 pos++;
415 q->q_stats.nr_sbals[pos]++;
416}
417
Jan Glauberbffbbd22011-04-20 10:15:33 +0200418static void process_buffer_error(struct qdio_q *q, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +0200419{
Jan Glauberbffbbd22011-04-20 10:15:33 +0200420 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
421 SLSB_P_OUTPUT_NOT_INIT;
422
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100423 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
Jan Glauber50f769d2008-12-25 13:38:47 +0100424
425 /* special handling for no target buffer empty */
426 if ((!q->is_input_q &&
Jan Glauber3ec90872011-06-06 14:14:40 +0200427 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100428 qperf_inc(q, target_full);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200429 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
Jan Glauber50f769d2008-12-25 13:38:47 +0100430 q->first_to_check);
431 return;
432 }
433
Jan Glauber22f99342008-12-25 13:38:46 +0100434 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
435 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
Jan Glauber50f769d2008-12-25 13:38:47 +0100436 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
Jan Glauber22f99342008-12-25 13:38:46 +0100437 DBF_ERROR("F14:%2x F15:%2x",
Jan Glauber3ec90872011-06-06 14:14:40 +0200438 q->sbal[q->first_to_check]->element[14].sflags,
439 q->sbal[q->first_to_check]->element[15].sflags);
Jan Glauberbffbbd22011-04-20 10:15:33 +0200440
441 /*
442 * Interrupts may be avoided as long as the error is present
443 * so change the buffer state immediately to avoid starvation.
444 */
445 set_buf_states(q, q->first_to_check, state, count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100446}
Jan Glauber779e6e12008-07-17 17:16:48 +0200447
Jan Glauber50f769d2008-12-25 13:38:47 +0100448static inline void inbound_primed(struct qdio_q *q, int count)
449{
450 int new;
451
Jan Glauber1d7e1502009-09-22 22:58:39 +0200452 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100453
454 /* for QEBSM the ACK was already set by EQBS */
455 if (is_qebsm(q)) {
456 if (!q->u.in.polling) {
457 q->u.in.polling = 1;
458 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100459 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100460 return;
461 }
462
463 /* delete the previous ACK's */
Jan Glaubere85dea02009-03-26 15:24:29 +0100464 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100465 q->u.in.ack_count);
466 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100467 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100468 return;
469 }
470
471 /*
472 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
473 * or by the next inbound run.
474 */
475 new = add_buf(q->first_to_check, count - 1);
476 if (q->u.in.polling) {
477 /* reset the previous ACK but first set the new one */
478 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glaubere85dea02009-03-26 15:24:29 +0100479 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100480 } else {
Jan Glauber50f769d2008-12-25 13:38:47 +0100481 q->u.in.polling = 1;
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100482 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glauber50f769d2008-12-25 13:38:47 +0100483 }
484
Jan Glaubere85dea02009-03-26 15:24:29 +0100485 q->u.in.ack_start = new;
Jan Glauber50f769d2008-12-25 13:38:47 +0100486 count--;
487 if (!count)
488 return;
Jan Glauber6541f7b2009-09-22 22:58:40 +0200489 /* need to change ALL buffers to get more interrupts */
490 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200491}
492
493static int get_inbound_buffer_frontier(struct qdio_q *q)
494{
495 int count, stop;
Jan Glauber6fa10982011-01-31 11:30:08 +0100496 unsigned char state = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200497
498 /*
Jan Glauber779e6e12008-07-17 17:16:48 +0200499 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
500 * would return 0.
501 */
502 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
503 stop = add_buf(q->first_to_check, count);
504
Jan Glauber779e6e12008-07-17 17:16:48 +0200505 if (q->first_to_check == stop)
506 goto out;
507
Jan Glauber36e3e722009-06-22 12:08:12 +0200508 /*
509 * No siga sync here, as a PCI or we after a thin interrupt
510 * already sync'ed the queues.
511 */
Jan Glauber50f769d2008-12-25 13:38:47 +0100512 count = get_buf_states(q, q->first_to_check, &state, count, 1);
Jan Glauber779e6e12008-07-17 17:16:48 +0200513 if (!count)
514 goto out;
515
516 switch (state) {
517 case SLSB_P_INPUT_PRIMED:
Jan Glauber50f769d2008-12-25 13:38:47 +0100518 inbound_primed(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200519 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauber8bcd9b02009-12-18 17:43:26 +0100520 if (atomic_sub(count, &q->nr_buf_used) == 0)
Jan Glauber6486cda2010-01-04 09:05:42 +0100521 qperf_inc(q, inbound_queue_full);
Jan Glauberd3072972010-02-26 22:37:36 +0100522 if (q->irq_ptr->perf_stat_enabled)
523 account_sbals(q, count);
Jan Glauber36e3e722009-06-22 12:08:12 +0200524 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200525 case SLSB_P_INPUT_ERROR:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200526 process_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200527 q->first_to_check = add_buf(q->first_to_check, count);
528 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100529 if (q->irq_ptr->perf_stat_enabled)
530 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200531 break;
532 case SLSB_CU_INPUT_EMPTY:
533 case SLSB_P_INPUT_NOT_INIT:
534 case SLSB_P_INPUT_ACK:
Jan Glauberd3072972010-02-26 22:37:36 +0100535 if (q->irq_ptr->perf_stat_enabled)
536 q->q_stats.nr_sbal_nop++;
Jan Glauber22f99342008-12-25 13:38:46 +0100537 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
Jan Glauber779e6e12008-07-17 17:16:48 +0200538 break;
539 default:
540 BUG();
541 }
542out:
Jan Glauber779e6e12008-07-17 17:16:48 +0200543 return q->first_to_check;
544}
545
Jan Glauber60b5df22009-06-22 12:08:10 +0200546static int qdio_inbound_q_moved(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200547{
548 int bufnr;
549
550 bufnr = get_inbound_buffer_frontier(q);
551
Jan Glaubere85dea02009-03-26 15:24:29 +0100552 if ((bufnr != q->last_move) || q->qdio_error) {
553 q->last_move = bufnr;
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100554 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
Jan Glauber3a601bf2010-05-17 10:00:17 +0200555 q->u.in.timestamp = get_clock();
Jan Glauber779e6e12008-07-17 17:16:48 +0200556 return 1;
557 } else
558 return 0;
559}
560
Jan Glauber9a2c1602009-06-22 12:08:11 +0200561static inline int qdio_inbound_q_done(struct qdio_q *q)
Jan Glauber60b5df22009-06-22 12:08:10 +0200562{
563 unsigned char state = 0;
564
565 if (!atomic_read(&q->nr_buf_used))
566 return 1;
567
Jan Glauber90adac52011-01-05 12:47:54 +0100568 if (need_siga_sync(q))
569 qdio_siga_sync_q(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200570 get_buf_state(q, q->first_to_check, &state, 0);
571
Ursula Braun4c522282010-02-09 09:46:07 +0100572 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
Jan Glauber60b5df22009-06-22 12:08:10 +0200573 /* more work coming */
574 return 0;
Jan Glauber9a2c1602009-06-22 12:08:11 +0200575
576 if (is_thinint_irq(q->irq_ptr))
577 return 1;
578
579 /* don't poll under z/VM */
580 if (MACHINE_IS_VM)
581 return 1;
582
583 /*
584 * At this point we know, that inbound first_to_check
585 * has (probably) not moved (see qdio_inbound_processing).
586 */
Jan Glauber3a601bf2010-05-17 10:00:17 +0200587 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
Jan Glauber1d7e1502009-09-22 22:58:39 +0200588 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
Jan Glauber9a2c1602009-06-22 12:08:11 +0200589 q->first_to_check);
590 return 1;
591 } else
592 return 0;
Jan Glauber60b5df22009-06-22 12:08:10 +0200593}
594
595static void qdio_kick_handler(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200596{
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100597 int start = q->first_to_kick;
598 int end = q->first_to_check;
599 int count;
Jan Glauber779e6e12008-07-17 17:16:48 +0200600
601 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
602 return;
603
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100604 count = sub_buf(end, start);
605
606 if (q->is_input_q) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100607 qperf_inc(q, inbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200608 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100609 } else {
Jan Glauber6486cda2010-01-04 09:05:42 +0100610 qperf_inc(q, outbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200611 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
612 start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100613 }
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100614
615 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
616 q->irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200617
618 /* for the next time */
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100619 q->first_to_kick = end;
Jan Glauber779e6e12008-07-17 17:16:48 +0200620 q->qdio_error = 0;
621}
622
623static void __qdio_inbound_processing(struct qdio_q *q)
624{
Jan Glauber6486cda2010-01-04 09:05:42 +0100625 qperf_inc(q, tasklet_inbound);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200626
Jan Glauber779e6e12008-07-17 17:16:48 +0200627 if (!qdio_inbound_q_moved(q))
628 return;
629
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100630 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200631
Jan Glauber6486cda2010-01-04 09:05:42 +0100632 if (!qdio_inbound_q_done(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200633 /* means poll time is not yet over */
Jan Glauber6486cda2010-01-04 09:05:42 +0100634 qperf_inc(q, tasklet_inbound_resched);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200635 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
636 tasklet_schedule(&q->tasklet);
637 return;
638 }
Jan Glauber6486cda2010-01-04 09:05:42 +0100639 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200640
641 qdio_stop_polling(q);
642 /*
643 * We need to check again to not lose initiative after
644 * resetting the ACK state.
645 */
Jan Glauber6486cda2010-01-04 09:05:42 +0100646 if (!qdio_inbound_q_done(q)) {
647 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200648 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
649 tasklet_schedule(&q->tasklet);
Jan Glauber6486cda2010-01-04 09:05:42 +0100650 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200651}
652
Jan Glauber779e6e12008-07-17 17:16:48 +0200653void qdio_inbound_processing(unsigned long data)
654{
655 struct qdio_q *q = (struct qdio_q *)data;
656 __qdio_inbound_processing(q);
657}
658
659static int get_outbound_buffer_frontier(struct qdio_q *q)
660{
661 int count, stop;
Jan Glauber6fa10982011-01-31 11:30:08 +0100662 unsigned char state = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200663
Jan Glauber90adac52011-01-05 12:47:54 +0100664 if (need_siga_sync(q))
665 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
666 !pci_out_supported(q)) ||
667 (queue_type(q) == QDIO_IQDIO_QFMT &&
668 multicast_outbound(q)))
669 qdio_siga_sync_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200670
671 /*
672 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
673 * would return 0.
674 */
675 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
676 stop = add_buf(q->first_to_check, count);
677
Jan Glauber779e6e12008-07-17 17:16:48 +0200678 if (q->first_to_check == stop)
679 return q->first_to_check;
680
Jan Glauber50f769d2008-12-25 13:38:47 +0100681 count = get_buf_states(q, q->first_to_check, &state, count, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200682 if (!count)
683 return q->first_to_check;
684
685 switch (state) {
686 case SLSB_P_OUTPUT_EMPTY:
687 /* the adapter got it */
Jan Glauber1d7e1502009-09-22 22:58:39 +0200688 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200689
690 atomic_sub(count, &q->nr_buf_used);
691 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauberd3072972010-02-26 22:37:36 +0100692 if (q->irq_ptr->perf_stat_enabled)
693 account_sbals(q, count);
Jan Glauber36e3e722009-06-22 12:08:12 +0200694 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200695 case SLSB_P_OUTPUT_ERROR:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200696 process_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200697 q->first_to_check = add_buf(q->first_to_check, count);
698 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100699 if (q->irq_ptr->perf_stat_enabled)
700 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200701 break;
702 case SLSB_CU_OUTPUT_PRIMED:
703 /* the adapter has not fetched the output yet */
Jan Glauberd3072972010-02-26 22:37:36 +0100704 if (q->irq_ptr->perf_stat_enabled)
705 q->q_stats.nr_sbal_nop++;
Jan Glauber22f99342008-12-25 13:38:46 +0100706 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200707 break;
708 case SLSB_P_OUTPUT_NOT_INIT:
709 case SLSB_P_OUTPUT_HALTED:
710 break;
711 default:
712 BUG();
713 }
714 return q->first_to_check;
715}
716
717/* all buffers processed? */
718static inline int qdio_outbound_q_done(struct qdio_q *q)
719{
720 return atomic_read(&q->nr_buf_used) == 0;
721}
722
723static inline int qdio_outbound_q_moved(struct qdio_q *q)
724{
725 int bufnr;
726
727 bufnr = get_outbound_buffer_frontier(q);
728
Jan Glaubere85dea02009-03-26 15:24:29 +0100729 if ((bufnr != q->last_move) || q->qdio_error) {
730 q->last_move = bufnr;
Jan Glauber22f99342008-12-25 13:38:46 +0100731 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200732 return 1;
733 } else
734 return 0;
735}
736
Jan Glauberd303b6f2009-03-26 15:24:31 +0100737static int qdio_kick_outbound_q(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200738{
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200739 int retries = 0, cc;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100740 unsigned int busy_bit;
Jan Glauber779e6e12008-07-17 17:16:48 +0200741
742 if (!need_siga_out(q))
Jan Glauberd303b6f2009-03-26 15:24:31 +0100743 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200744
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100745 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200746retry:
Jan Glauber6486cda2010-01-04 09:05:42 +0100747 qperf_inc(q, siga_write);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100748
749 cc = qdio_siga_output(q, &busy_bit);
750 switch (cc) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200751 case 0:
Jan Glauber779e6e12008-07-17 17:16:48 +0200752 break;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100753 case 2:
754 if (busy_bit) {
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200755 while (++retries < QDIO_BUSY_BIT_RETRIES) {
756 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
757 goto retry;
758 }
759 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
Jan Glauberd303b6f2009-03-26 15:24:31 +0100760 cc |= QDIO_ERROR_SIGA_BUSY;
761 } else
762 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100763 break;
764 case 1:
765 case 3:
766 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100767 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200768 }
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200769 if (retries) {
770 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
771 DBF_ERROR("count:%u", retries);
772 }
Jan Glauberd303b6f2009-03-26 15:24:31 +0100773 return cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200774}
775
Jan Glauber779e6e12008-07-17 17:16:48 +0200776static void __qdio_outbound_processing(struct qdio_q *q)
777{
Jan Glauber6486cda2010-01-04 09:05:42 +0100778 qperf_inc(q, tasklet_outbound);
Jan Glauber779e6e12008-07-17 17:16:48 +0200779 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
780
781 if (qdio_outbound_q_moved(q))
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100782 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200783
Jan Glauberc38f9602009-03-26 15:24:26 +0100784 if (queue_type(q) == QDIO_ZFCP_QFMT)
Jan Glauber779e6e12008-07-17 17:16:48 +0200785 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
Jan Glauberc38f9602009-03-26 15:24:26 +0100786 goto sched;
Jan Glauber779e6e12008-07-17 17:16:48 +0200787
788 /* bail out for HiperSockets unicast queues */
789 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
790 return;
791
Ursula Braun4bcb3a32008-10-10 21:33:04 +0200792 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
Jan Glauberc38f9602009-03-26 15:24:26 +0100793 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
794 goto sched;
Ursula Braun4bcb3a32008-10-10 21:33:04 +0200795
Jan Glauber779e6e12008-07-17 17:16:48 +0200796 if (q->u.out.pci_out_enabled)
797 return;
798
799 /*
800 * Now we know that queue type is either qeth without pci enabled
801 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
802 * EMPTY is noticed and outbound_handler is called after some time.
803 */
804 if (qdio_outbound_q_done(q))
805 del_timer(&q->u.out.timer);
Jan Glauber6486cda2010-01-04 09:05:42 +0100806 else
807 if (!timer_pending(&q->u.out.timer))
Jan Glauber779e6e12008-07-17 17:16:48 +0200808 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
Jan Glauberc38f9602009-03-26 15:24:26 +0100809 return;
810
811sched:
812 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
813 return;
814 tasklet_schedule(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +0200815}
816
817/* outbound tasklet */
818void qdio_outbound_processing(unsigned long data)
819{
820 struct qdio_q *q = (struct qdio_q *)data;
821 __qdio_outbound_processing(q);
822}
823
824void qdio_outbound_timer(unsigned long data)
825{
826 struct qdio_q *q = (struct qdio_q *)data;
Jan Glauberc38f9602009-03-26 15:24:26 +0100827
828 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
829 return;
Jan Glauber779e6e12008-07-17 17:16:48 +0200830 tasklet_schedule(&q->tasklet);
831}
832
Jan Glauber60b5df22009-06-22 12:08:10 +0200833static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200834{
835 struct qdio_q *out;
836 int i;
837
838 if (!pci_out_supported(q))
839 return;
840
841 for_each_output_queue(q->irq_ptr, out, i)
842 if (!qdio_outbound_q_done(out))
843 tasklet_schedule(&out->tasklet);
844}
845
Jan Glauber60b5df22009-06-22 12:08:10 +0200846static void __tiqdio_inbound_processing(struct qdio_q *q)
847{
Jan Glauber6486cda2010-01-04 09:05:42 +0100848 qperf_inc(q, tasklet_inbound);
Jan Glauber90adac52011-01-05 12:47:54 +0100849 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
850 qdio_sync_queues(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200851
852 /*
853 * The interrupt could be caused by a PCI request. Check the
854 * PCI capable outbound queues.
855 */
856 qdio_check_outbound_after_thinint(q);
857
858 if (!qdio_inbound_q_moved(q))
859 return;
860
861 qdio_kick_handler(q);
862
Jan Glauber9a2c1602009-06-22 12:08:11 +0200863 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100864 qperf_inc(q, tasklet_inbound_resched);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200865 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
Jan Glauber60b5df22009-06-22 12:08:10 +0200866 tasklet_schedule(&q->tasklet);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200867 return;
868 }
Jan Glauber60b5df22009-06-22 12:08:10 +0200869 }
870
871 qdio_stop_polling(q);
872 /*
873 * We need to check again to not lose initiative after
874 * resetting the ACK state.
875 */
Jan Glauber9a2c1602009-06-22 12:08:11 +0200876 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100877 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauber60b5df22009-06-22 12:08:10 +0200878 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
879 tasklet_schedule(&q->tasklet);
880 }
881}
882
883void tiqdio_inbound_processing(unsigned long data)
884{
885 struct qdio_q *q = (struct qdio_q *)data;
886 __tiqdio_inbound_processing(q);
887}
888
Jan Glauber779e6e12008-07-17 17:16:48 +0200889static inline void qdio_set_state(struct qdio_irq *irq_ptr,
890 enum qdio_irq_states state)
891{
Jan Glauber22f99342008-12-25 13:38:46 +0100892 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
Jan Glauber779e6e12008-07-17 17:16:48 +0200893
894 irq_ptr->state = state;
895 mb();
896}
897
Jan Glauber22f99342008-12-25 13:38:46 +0100898static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
Jan Glauber779e6e12008-07-17 17:16:48 +0200899{
Jan Glauber779e6e12008-07-17 17:16:48 +0200900 if (irb->esw.esw0.erw.cons) {
Jan Glauber22f99342008-12-25 13:38:46 +0100901 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
902 DBF_ERROR_HEX(irb, 64);
903 DBF_ERROR_HEX(irb->ecw, 64);
Jan Glauber779e6e12008-07-17 17:16:48 +0200904 }
905}
906
907/* PCI interrupt handler */
908static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
909{
910 int i;
911 struct qdio_q *q;
912
Jan Glauberc38f9602009-03-26 15:24:26 +0100913 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
914 return;
915
Jan Glauberd36deae2010-09-07 21:14:39 +0000916 for_each_input_queue(irq_ptr, q, i) {
917 if (q->u.in.queue_start_poll) {
918 /* skip if polling is enabled or already in work */
919 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
920 &q->u.in.queue_irq_state)) {
921 qperf_inc(q, int_discarded);
922 continue;
923 }
924 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
925 q->irq_ptr->int_parm);
926 } else
927 tasklet_schedule(&q->tasklet);
928 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200929
Jan Glauber90adac52011-01-05 12:47:54 +0100930 if (!pci_out_supported(q))
Jan Glauber779e6e12008-07-17 17:16:48 +0200931 return;
932
933 for_each_output_queue(irq_ptr, q, i) {
934 if (qdio_outbound_q_done(q))
935 continue;
Jan Glauber90adac52011-01-05 12:47:54 +0100936 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
Jan Glauber779e6e12008-07-17 17:16:48 +0200937 qdio_siga_sync_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200938 tasklet_schedule(&q->tasklet);
939 }
940}
941
942static void qdio_handle_activate_check(struct ccw_device *cdev,
943 unsigned long intparm, int cstat, int dstat)
944{
945 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
946 struct qdio_q *q;
Swen Schilligdfe5bb52011-08-15 14:40:31 +0200947 int count;
Jan Glauber779e6e12008-07-17 17:16:48 +0200948
Jan Glauber22f99342008-12-25 13:38:46 +0100949 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
950 DBF_ERROR("intp :%lx", intparm);
951 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
Jan Glauber779e6e12008-07-17 17:16:48 +0200952
953 if (irq_ptr->nr_input_qs) {
954 q = irq_ptr->input_qs[0];
955 } else if (irq_ptr->nr_output_qs) {
956 q = irq_ptr->output_qs[0];
957 } else {
958 dump_stack();
959 goto no_handler;
960 }
Swen Schilligdfe5bb52011-08-15 14:40:31 +0200961
962 count = sub_buf(q->first_to_check, q->first_to_kick);
Jan Glauber779e6e12008-07-17 17:16:48 +0200963 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
Swen Schilligdfe5bb52011-08-15 14:40:31 +0200964 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200965no_handler:
966 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
967}
968
Jan Glauber779e6e12008-07-17 17:16:48 +0200969static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
970 int dstat)
971{
972 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +0200973
Jan Glauber22f99342008-12-25 13:38:46 +0100974 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
Jan Glauber4c575422009-06-12 10:26:28 +0200975
976 if (cstat)
977 goto error;
978 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
979 goto error;
980 if (!(dstat & DEV_STAT_DEV_END))
981 goto error;
982 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
983 return;
984
985error:
986 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
987 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
988 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
Jan Glauber779e6e12008-07-17 17:16:48 +0200989}
990
991/* qdio interrupt handler */
992void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
993 struct irb *irb)
994{
995 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
996 int cstat, dstat;
Jan Glauber779e6e12008-07-17 17:16:48 +0200997
Jan Glauber779e6e12008-07-17 17:16:48 +0200998 if (!intparm || !irq_ptr) {
Jan Glauber22f99342008-12-25 13:38:46 +0100999 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001000 return;
1001 }
1002
Jan Glauber30d77c32011-01-05 12:47:29 +01001003 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
Jan Glauber09a308f2010-05-17 10:00:14 +02001004 if (irq_ptr->perf_stat_enabled)
1005 irq_ptr->perf_stat.qdio_int++;
1006
Jan Glauber779e6e12008-07-17 17:16:48 +02001007 if (IS_ERR(irb)) {
1008 switch (PTR_ERR(irb)) {
1009 case -EIO:
Jan Glauber22f99342008-12-25 13:38:46 +01001010 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
Jan Glauber75cb71f2009-04-14 15:36:22 +02001011 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1012 wake_up(&cdev->private->wait_q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001013 return;
1014 default:
1015 WARN_ON(1);
1016 return;
1017 }
1018 }
Jan Glauber22f99342008-12-25 13:38:46 +01001019 qdio_irq_check_sense(irq_ptr, irb);
Jan Glauber779e6e12008-07-17 17:16:48 +02001020 cstat = irb->scsw.cmd.cstat;
1021 dstat = irb->scsw.cmd.dstat;
1022
1023 switch (irq_ptr->state) {
1024 case QDIO_IRQ_STATE_INACTIVE:
1025 qdio_establish_handle_irq(cdev, cstat, dstat);
1026 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001027 case QDIO_IRQ_STATE_CLEANUP:
1028 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1029 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001030 case QDIO_IRQ_STATE_ESTABLISHED:
1031 case QDIO_IRQ_STATE_ACTIVE:
1032 if (cstat & SCHN_STAT_PCI) {
1033 qdio_int_handler_pci(irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001034 return;
1035 }
Jan Glauber4c575422009-06-12 10:26:28 +02001036 if (cstat || dstat)
Jan Glauber779e6e12008-07-17 17:16:48 +02001037 qdio_handle_activate_check(cdev, intparm, cstat,
1038 dstat);
Jan Glauber4c575422009-06-12 10:26:28 +02001039 break;
Jan Glauber959153d2010-02-09 09:46:08 +01001040 case QDIO_IRQ_STATE_STOPPED:
1041 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001042 default:
1043 WARN_ON(1);
1044 }
1045 wake_up(&cdev->private->wait_q);
1046}
1047
1048/**
1049 * qdio_get_ssqd_desc - get qdio subchannel description
1050 * @cdev: ccw device to get description for
Jan Glauberbbd50e12008-12-25 13:38:43 +01001051 * @data: where to store the ssqd
Jan Glauber779e6e12008-07-17 17:16:48 +02001052 *
Jan Glauberbbd50e12008-12-25 13:38:43 +01001053 * Returns 0 or an error code. The results of the chsc are stored in the
1054 * specified structure.
Jan Glauber779e6e12008-07-17 17:16:48 +02001055 */
Jan Glauberbbd50e12008-12-25 13:38:43 +01001056int qdio_get_ssqd_desc(struct ccw_device *cdev,
1057 struct qdio_ssqd_desc *data)
Jan Glauber779e6e12008-07-17 17:16:48 +02001058{
Jan Glauber779e6e12008-07-17 17:16:48 +02001059
Jan Glauberbbd50e12008-12-25 13:38:43 +01001060 if (!cdev || !cdev->private)
1061 return -EINVAL;
1062
Jan Glauber22f99342008-12-25 13:38:46 +01001063 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
Jan Glauberbbd50e12008-12-25 13:38:43 +01001064 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
Jan Glauber779e6e12008-07-17 17:16:48 +02001065}
1066EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1067
Jan Glauber779e6e12008-07-17 17:16:48 +02001068static void qdio_shutdown_queues(struct ccw_device *cdev)
1069{
1070 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1071 struct qdio_q *q;
1072 int i;
1073
1074 for_each_input_queue(irq_ptr, q, i)
Jan Glauberc38f9602009-03-26 15:24:26 +01001075 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001076
1077 for_each_output_queue(irq_ptr, q, i) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001078 del_timer(&q->u.out.timer);
Jan Glauberc38f9602009-03-26 15:24:26 +01001079 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001080 }
1081}
1082
1083/**
1084 * qdio_shutdown - shut down a qdio subchannel
1085 * @cdev: associated ccw device
1086 * @how: use halt or clear to shutdown
1087 */
1088int qdio_shutdown(struct ccw_device *cdev, int how)
1089{
Jan Glauber22f99342008-12-25 13:38:46 +01001090 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001091 int rc;
1092 unsigned long flags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001093
Jan Glauber779e6e12008-07-17 17:16:48 +02001094 if (!irq_ptr)
1095 return -ENODEV;
1096
Jan Glauberb4547402009-03-26 15:24:24 +01001097 BUG_ON(irqs_disabled());
Jan Glauber22f99342008-12-25 13:38:46 +01001098 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1099
Jan Glauber779e6e12008-07-17 17:16:48 +02001100 mutex_lock(&irq_ptr->setup_mutex);
1101 /*
1102 * Subchannel was already shot down. We cannot prevent being called
1103 * twice since cio may trigger a shutdown asynchronously.
1104 */
1105 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1106 mutex_unlock(&irq_ptr->setup_mutex);
1107 return 0;
1108 }
1109
Jan Glauberc38f9602009-03-26 15:24:26 +01001110 /*
1111 * Indicate that the device is going down. Scheduling the queue
1112 * tasklets is forbidden from here on.
1113 */
1114 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1115
Jan Glauber779e6e12008-07-17 17:16:48 +02001116 tiqdio_remove_input_queues(irq_ptr);
1117 qdio_shutdown_queues(cdev);
1118 qdio_shutdown_debug_entries(irq_ptr, cdev);
1119
1120 /* cleanup subchannel */
1121 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1122
1123 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1124 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1125 else
1126 /* default behaviour is halt */
1127 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1128 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001129 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1130 DBF_ERROR("rc:%4d", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001131 goto no_cleanup;
1132 }
1133
1134 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1135 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1136 wait_event_interruptible_timeout(cdev->private->wait_q,
1137 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1138 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1139 10 * HZ);
1140 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1141
1142no_cleanup:
1143 qdio_shutdown_thinint(irq_ptr);
1144
1145 /* restore interrupt handler */
1146 if ((void *)cdev->handler == (void *)qdio_int_handler)
1147 cdev->handler = irq_ptr->orig_handler;
1148 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1149
1150 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1151 mutex_unlock(&irq_ptr->setup_mutex);
Jan Glauber779e6e12008-07-17 17:16:48 +02001152 if (rc)
1153 return rc;
1154 return 0;
1155}
1156EXPORT_SYMBOL_GPL(qdio_shutdown);
1157
1158/**
1159 * qdio_free - free data structures for a qdio subchannel
1160 * @cdev: associated ccw device
1161 */
1162int qdio_free(struct ccw_device *cdev)
1163{
Jan Glauber22f99342008-12-25 13:38:46 +01001164 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001165
Jan Glauber779e6e12008-07-17 17:16:48 +02001166 if (!irq_ptr)
1167 return -ENODEV;
1168
Jan Glauber22f99342008-12-25 13:38:46 +01001169 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001170 mutex_lock(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001171
1172 if (irq_ptr->debug_area != NULL) {
1173 debug_unregister(irq_ptr->debug_area);
1174 irq_ptr->debug_area = NULL;
1175 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001176 cdev->private->qdio_data = NULL;
1177 mutex_unlock(&irq_ptr->setup_mutex);
1178
1179 qdio_release_memory(irq_ptr);
1180 return 0;
1181}
1182EXPORT_SYMBOL_GPL(qdio_free);
1183
1184/**
Jan Glauber779e6e12008-07-17 17:16:48 +02001185 * qdio_allocate - allocate qdio queues and associated data
1186 * @init_data: initialization data
1187 */
1188int qdio_allocate(struct qdio_initialize *init_data)
1189{
1190 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001191
Jan Glauber22f99342008-12-25 13:38:46 +01001192 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001193
1194 if ((init_data->no_input_qs && !init_data->input_handler) ||
1195 (init_data->no_output_qs && !init_data->output_handler))
1196 return -EINVAL;
1197
1198 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1199 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1200 return -EINVAL;
1201
1202 if ((!init_data->input_sbal_addr_array) ||
1203 (!init_data->output_sbal_addr_array))
1204 return -EINVAL;
1205
Jan Glauber779e6e12008-07-17 17:16:48 +02001206 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1207 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1208 if (!irq_ptr)
1209 goto out_err;
Jan Glauber779e6e12008-07-17 17:16:48 +02001210
1211 mutex_init(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001212 qdio_allocate_dbf(init_data, irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001213
1214 /*
1215 * Allocate a page for the chsc calls in qdio_establish.
1216 * Must be pre-allocated since a zfcp recovery will call
1217 * qdio_establish. In case of low memory and swap on a zfcp disk
1218 * we may not be able to allocate memory otherwise.
1219 */
1220 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1221 if (!irq_ptr->chsc_page)
1222 goto out_rel;
1223
1224 /* qdr is used in ccw1.cda which is u32 */
Jan Glauber3b8e3002008-08-01 16:39:17 +02001225 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Jan Glauber779e6e12008-07-17 17:16:48 +02001226 if (!irq_ptr->qdr)
1227 goto out_rel;
1228 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1229
Jan Glauber779e6e12008-07-17 17:16:48 +02001230 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1231 init_data->no_output_qs))
1232 goto out_rel;
1233
1234 init_data->cdev->private->qdio_data = irq_ptr;
1235 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1236 return 0;
1237out_rel:
1238 qdio_release_memory(irq_ptr);
1239out_err:
1240 return -ENOMEM;
1241}
1242EXPORT_SYMBOL_GPL(qdio_allocate);
1243
1244/**
1245 * qdio_establish - establish queues on a qdio subchannel
1246 * @init_data: initialization data
1247 */
1248int qdio_establish(struct qdio_initialize *init_data)
1249{
Jan Glauber779e6e12008-07-17 17:16:48 +02001250 struct qdio_irq *irq_ptr;
1251 struct ccw_device *cdev = init_data->cdev;
1252 unsigned long saveflags;
1253 int rc;
1254
Jan Glauber22f99342008-12-25 13:38:46 +01001255 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001256
Jan Glauber779e6e12008-07-17 17:16:48 +02001257 irq_ptr = cdev->private->qdio_data;
1258 if (!irq_ptr)
1259 return -ENODEV;
1260
1261 if (cdev->private->state != DEV_STATE_ONLINE)
1262 return -EINVAL;
1263
Jan Glauber779e6e12008-07-17 17:16:48 +02001264 mutex_lock(&irq_ptr->setup_mutex);
1265 qdio_setup_irq(init_data);
1266
1267 rc = qdio_establish_thinint(irq_ptr);
1268 if (rc) {
1269 mutex_unlock(&irq_ptr->setup_mutex);
1270 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1271 return rc;
1272 }
1273
1274 /* establish q */
1275 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1276 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1277 irq_ptr->ccw.count = irq_ptr->equeue.count;
1278 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1279
1280 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1281 ccw_device_set_options_mask(cdev, 0);
1282
1283 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1284 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001285 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1286 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001287 }
1288 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1289
1290 if (rc) {
1291 mutex_unlock(&irq_ptr->setup_mutex);
1292 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1293 return rc;
1294 }
1295
1296 wait_event_interruptible_timeout(cdev->private->wait_q,
1297 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1298 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1299
1300 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1301 mutex_unlock(&irq_ptr->setup_mutex);
1302 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1303 return -EIO;
1304 }
1305
1306 qdio_setup_ssqd_info(irq_ptr);
Jan Glauber22f99342008-12-25 13:38:46 +01001307 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
Jan Glauber779e6e12008-07-17 17:16:48 +02001308
1309 /* qebsm is now setup if available, initialize buffer states */
1310 qdio_init_buf_states(irq_ptr);
1311
1312 mutex_unlock(&irq_ptr->setup_mutex);
1313 qdio_print_subchannel_info(irq_ptr, cdev);
1314 qdio_setup_debug_entries(irq_ptr, cdev);
1315 return 0;
1316}
1317EXPORT_SYMBOL_GPL(qdio_establish);
1318
1319/**
1320 * qdio_activate - activate queues on a qdio subchannel
1321 * @cdev: associated cdev
1322 */
1323int qdio_activate(struct ccw_device *cdev)
1324{
1325 struct qdio_irq *irq_ptr;
1326 int rc;
1327 unsigned long saveflags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001328
Jan Glauber22f99342008-12-25 13:38:46 +01001329 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001330
Jan Glauber779e6e12008-07-17 17:16:48 +02001331 irq_ptr = cdev->private->qdio_data;
1332 if (!irq_ptr)
1333 return -ENODEV;
1334
1335 if (cdev->private->state != DEV_STATE_ONLINE)
1336 return -EINVAL;
1337
1338 mutex_lock(&irq_ptr->setup_mutex);
1339 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1340 rc = -EBUSY;
1341 goto out;
1342 }
1343
Jan Glauber779e6e12008-07-17 17:16:48 +02001344 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1345 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1346 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1347 irq_ptr->ccw.cda = 0;
1348
1349 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1350 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1351
1352 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1353 0, DOIO_DENY_PREFETCH);
1354 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001355 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1356 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001357 }
1358 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1359
1360 if (rc)
1361 goto out;
1362
1363 if (is_thinint_irq(irq_ptr))
1364 tiqdio_add_input_queues(irq_ptr);
1365
1366 /* wait for subchannel to become active */
1367 msleep(5);
1368
1369 switch (irq_ptr->state) {
1370 case QDIO_IRQ_STATE_STOPPED:
1371 case QDIO_IRQ_STATE_ERR:
Jan Glaubere4c14e22009-03-26 15:24:25 +01001372 rc = -EIO;
1373 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001374 default:
1375 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1376 rc = 0;
1377 }
1378out:
1379 mutex_unlock(&irq_ptr->setup_mutex);
1380 return rc;
1381}
1382EXPORT_SYMBOL_GPL(qdio_activate);
1383
1384static inline int buf_in_between(int bufnr, int start, int count)
1385{
1386 int end = add_buf(start, count);
1387
1388 if (end > start) {
1389 if (bufnr >= start && bufnr < end)
1390 return 1;
1391 else
1392 return 0;
1393 }
1394
1395 /* wrap-around case */
1396 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1397 (bufnr < end))
1398 return 1;
1399 else
1400 return 0;
1401}
1402
1403/**
1404 * handle_inbound - reset processed input buffers
1405 * @q: queue containing the buffers
1406 * @callflags: flags
1407 * @bufnr: first buffer to process
1408 * @count: how many buffers are emptied
1409 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001410static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1411 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001412{
Jan Glauberd303b6f2009-03-26 15:24:31 +01001413 int used, diff;
Jan Glauber779e6e12008-07-17 17:16:48 +02001414
Jan Glauber6486cda2010-01-04 09:05:42 +01001415 qperf_inc(q, inbound_call);
1416
Jan Glauber50f769d2008-12-25 13:38:47 +01001417 if (!q->u.in.polling)
1418 goto set;
1419
1420 /* protect against stop polling setting an ACK for an emptied slsb */
1421 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1422 /* overwriting everything, just delete polling status */
1423 q->u.in.polling = 0;
1424 q->u.in.ack_count = 0;
1425 goto set;
Jan Glaubere85dea02009-03-26 15:24:29 +01001426 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
Jan Glauber50f769d2008-12-25 13:38:47 +01001427 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +01001428 /* partial overwrite, just update ack_start */
Jan Glauber50f769d2008-12-25 13:38:47 +01001429 diff = add_buf(bufnr, count);
Jan Glaubere85dea02009-03-26 15:24:29 +01001430 diff = sub_buf(diff, q->u.in.ack_start);
Jan Glauber50f769d2008-12-25 13:38:47 +01001431 q->u.in.ack_count -= diff;
1432 if (q->u.in.ack_count <= 0) {
1433 q->u.in.polling = 0;
1434 q->u.in.ack_count = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001435 goto set;
1436 }
Jan Glaubere85dea02009-03-26 15:24:29 +01001437 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
Jan Glauber50f769d2008-12-25 13:38:47 +01001438 }
1439 else
1440 /* the only ACK will be deleted, so stop polling */
Jan Glauber779e6e12008-07-17 17:16:48 +02001441 q->u.in.polling = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001442 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001443
Jan Glauber50f769d2008-12-25 13:38:47 +01001444set:
Jan Glauber779e6e12008-07-17 17:16:48 +02001445 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001446
1447 used = atomic_add_return(count, &q->nr_buf_used) - count;
1448 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1449
1450 /* no need to signal as long as the adapter had free buffers */
1451 if (used)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001452 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001453
Jan Glauberd303b6f2009-03-26 15:24:31 +01001454 if (need_siga_in(q))
1455 return qdio_siga_input(q);
1456 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001457}
1458
1459/**
1460 * handle_outbound - process filled outbound buffers
1461 * @q: queue containing the buffers
1462 * @callflags: flags
1463 * @bufnr: first buffer to process
1464 * @count: how many buffers are filled
1465 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001466static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1467 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001468{
Jan Glauberc26001d2011-05-23 10:24:38 +02001469 unsigned char state = 0;
Jan Glauberd303b6f2009-03-26 15:24:31 +01001470 int used, rc = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001471
Jan Glauber6486cda2010-01-04 09:05:42 +01001472 qperf_inc(q, outbound_call);
Jan Glauber779e6e12008-07-17 17:16:48 +02001473
1474 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1475 used = atomic_add_return(count, &q->nr_buf_used);
1476 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1477
Jan Glauber01958432011-01-05 12:47:51 +01001478 if (used == QDIO_MAX_BUFFERS_PER_Q)
1479 qperf_inc(q, outbound_queue_full);
1480
Jan Glauber6486cda2010-01-04 09:05:42 +01001481 if (callflags & QDIO_FLAG_PCI_OUT) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001482 q->u.out.pci_out_enabled = 1;
Jan Glauber6486cda2010-01-04 09:05:42 +01001483 qperf_inc(q, pci_request_int);
Jan Glauber110da312011-01-05 12:47:53 +01001484 } else
Jan Glauber779e6e12008-07-17 17:16:48 +02001485 q->u.out.pci_out_enabled = 0;
1486
1487 if (queue_type(q) == QDIO_IQDIO_QFMT) {
Jan Glauber110da312011-01-05 12:47:53 +01001488 /* One SIGA-W per buffer required for unicast HiperSockets. */
1489 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1490
1491 rc = qdio_kick_outbound_q(q);
Jan Glauber90adac52011-01-05 12:47:54 +01001492 } else if (need_siga_sync(q)) {
Jan Glauber110da312011-01-05 12:47:53 +01001493 rc = qdio_siga_sync_q(q);
1494 } else {
1495 /* try to fast requeue buffers */
1496 get_buf_state(q, prev_buf(bufnr), &state, 0);
1497 if (state != SLSB_CU_OUTPUT_PRIMED)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001498 rc = qdio_kick_outbound_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001499 else
Jan Glauber110da312011-01-05 12:47:53 +01001500 qperf_inc(q, fast_requeue);
Jan Glauber779e6e12008-07-17 17:16:48 +02001501 }
1502
Jan Glauber3d6c76f2011-01-05 12:47:50 +01001503 /* in case of SIGA errors we must process the error immediately */
1504 if (used >= q->u.out.scan_threshold || rc)
1505 tasklet_schedule(&q->tasklet);
1506 else
1507 /* free the SBALs in case of no further traffic */
1508 if (!timer_pending(&q->u.out.timer))
1509 mod_timer(&q->u.out.timer, jiffies + HZ);
Jan Glauberd303b6f2009-03-26 15:24:31 +01001510 return rc;
Jan Glauber779e6e12008-07-17 17:16:48 +02001511}
1512
1513/**
1514 * do_QDIO - process input or output buffers
1515 * @cdev: associated ccw_device for the qdio subchannel
1516 * @callflags: input or output and special flags from the program
1517 * @q_nr: queue number
1518 * @bufnr: buffer number
1519 * @count: how many buffers to process
1520 */
1521int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
Jan Glauber66182412009-06-22 12:08:15 +02001522 int q_nr, unsigned int bufnr, unsigned int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001523{
1524 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001525
Jan Glauber66182412009-06-22 12:08:15 +02001526 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
Jan Glauber779e6e12008-07-17 17:16:48 +02001527 return -EINVAL;
1528
Jan Glauber779e6e12008-07-17 17:16:48 +02001529 irq_ptr = cdev->private->qdio_data;
1530 if (!irq_ptr)
1531 return -ENODEV;
1532
Jan Glauber1d7e1502009-09-22 22:58:39 +02001533 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1534 "do%02x b:%02x c:%02x", callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001535
1536 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1537 return -EBUSY;
Jan Glauber9a265132011-03-23 10:16:01 +01001538 if (!count)
1539 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001540 if (callflags & QDIO_FLAG_SYNC_INPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001541 return handle_inbound(irq_ptr->input_qs[q_nr],
1542 callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001543 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001544 return handle_outbound(irq_ptr->output_qs[q_nr],
1545 callflags, bufnr, count);
1546 return -EINVAL;
Jan Glauber779e6e12008-07-17 17:16:48 +02001547}
1548EXPORT_SYMBOL_GPL(do_QDIO);
1549
Jan Glauberd36deae2010-09-07 21:14:39 +00001550/**
1551 * qdio_start_irq - process input buffers
1552 * @cdev: associated ccw_device for the qdio subchannel
1553 * @nr: input queue number
1554 *
1555 * Return codes
1556 * 0 - success
1557 * 1 - irqs not started since new data is available
1558 */
1559int qdio_start_irq(struct ccw_device *cdev, int nr)
1560{
1561 struct qdio_q *q;
1562 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1563
1564 if (!irq_ptr)
1565 return -ENODEV;
1566 q = irq_ptr->input_qs[nr];
1567
1568 WARN_ON(queue_irqs_enabled(q));
1569
Jan Glauber4f325182011-01-05 12:47:49 +01001570 if (!shared_ind(q->irq_ptr->dsci))
Jan Glauberd36deae2010-09-07 21:14:39 +00001571 xchg(q->irq_ptr->dsci, 0);
1572
1573 qdio_stop_polling(q);
1574 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1575
1576 /*
1577 * We need to check again to not lose initiative after
1578 * resetting the ACK state.
1579 */
Jan Glauber4f325182011-01-05 12:47:49 +01001580 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
Jan Glauberd36deae2010-09-07 21:14:39 +00001581 goto rescan;
1582 if (!qdio_inbound_q_done(q))
1583 goto rescan;
1584 return 0;
1585
1586rescan:
1587 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1588 &q->u.in.queue_irq_state))
1589 return 0;
1590 else
1591 return 1;
1592
1593}
1594EXPORT_SYMBOL(qdio_start_irq);
1595
1596/**
1597 * qdio_get_next_buffers - process input buffers
1598 * @cdev: associated ccw_device for the qdio subchannel
1599 * @nr: input queue number
1600 * @bufnr: first filled buffer number
1601 * @error: buffers are in error state
1602 *
1603 * Return codes
1604 * < 0 - error
1605 * = 0 - no new buffers found
1606 * > 0 - number of processed buffers
1607 */
1608int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1609 int *error)
1610{
1611 struct qdio_q *q;
1612 int start, end;
1613 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1614
1615 if (!irq_ptr)
1616 return -ENODEV;
1617 q = irq_ptr->input_qs[nr];
1618 WARN_ON(queue_irqs_enabled(q));
1619
Jan Glauberd36deae2010-09-07 21:14:39 +00001620 /*
Jan Glauber90adac52011-01-05 12:47:54 +01001621 * Cannot rely on automatic sync after interrupt since queues may
1622 * also be examined without interrupt.
Jan Glauberd36deae2010-09-07 21:14:39 +00001623 */
Jan Glauber90adac52011-01-05 12:47:54 +01001624 if (need_siga_sync(q))
1625 qdio_sync_queues(q);
1626
1627 /* check the PCI capable outbound queues. */
Jan Glauberd36deae2010-09-07 21:14:39 +00001628 qdio_check_outbound_after_thinint(q);
1629
1630 if (!qdio_inbound_q_moved(q))
1631 return 0;
1632
1633 /* Note: upper-layer MUST stop processing immediately here ... */
1634 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1635 return -EIO;
1636
1637 start = q->first_to_kick;
1638 end = q->first_to_check;
1639 *bufnr = start;
1640 *error = q->qdio_error;
1641
1642 /* for the next time */
1643 q->first_to_kick = end;
1644 q->qdio_error = 0;
1645 return sub_buf(end, start);
1646}
1647EXPORT_SYMBOL(qdio_get_next_buffers);
1648
1649/**
1650 * qdio_stop_irq - disable interrupt processing for the device
1651 * @cdev: associated ccw_device for the qdio subchannel
1652 * @nr: input queue number
1653 *
1654 * Return codes
1655 * 0 - interrupts were already disabled
1656 * 1 - interrupts successfully disabled
1657 */
1658int qdio_stop_irq(struct ccw_device *cdev, int nr)
1659{
1660 struct qdio_q *q;
1661 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1662
1663 if (!irq_ptr)
1664 return -ENODEV;
1665 q = irq_ptr->input_qs[nr];
1666
1667 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1668 &q->u.in.queue_irq_state))
1669 return 0;
1670 else
1671 return 1;
1672}
1673EXPORT_SYMBOL(qdio_stop_irq);
1674
Jan Glauber779e6e12008-07-17 17:16:48 +02001675static int __init init_QDIO(void)
1676{
1677 int rc;
1678
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001679 rc = qdio_debug_init();
Jan Glauber779e6e12008-07-17 17:16:48 +02001680 if (rc)
1681 return rc;
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001682 rc = qdio_setup_init();
1683 if (rc)
1684 goto out_debug;
Jan Glauber779e6e12008-07-17 17:16:48 +02001685 rc = tiqdio_allocate_memory();
1686 if (rc)
1687 goto out_cache;
Jan Glauber779e6e12008-07-17 17:16:48 +02001688 rc = tiqdio_register_thinints();
1689 if (rc)
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001690 goto out_ti;
Jan Glauber779e6e12008-07-17 17:16:48 +02001691 return 0;
1692
Jan Glauber779e6e12008-07-17 17:16:48 +02001693out_ti:
1694 tiqdio_free_memory();
1695out_cache:
1696 qdio_setup_exit();
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001697out_debug:
1698 qdio_debug_exit();
Jan Glauber779e6e12008-07-17 17:16:48 +02001699 return rc;
1700}
1701
1702static void __exit exit_QDIO(void)
1703{
1704 tiqdio_unregister_thinints();
1705 tiqdio_free_memory();
Jan Glauber779e6e12008-07-17 17:16:48 +02001706 qdio_setup_exit();
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001707 qdio_debug_exit();
Jan Glauber779e6e12008-07-17 17:16:48 +02001708}
1709
1710module_init(init_QDIO);
1711module_exit(exit_QDIO);