blob: 7493efafa0d5a69b8ad674ba38bc47d42d097c8b [file] [log] [blame]
Jan Glauber779e6e12008-07-17 17:16:48 +02001/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000017#include <linux/io.h>
Arun Sharma600634972011-07-26 16:09:06 -070018#include <linux/atomic.h>
Jan Glauber779e6e12008-07-17 17:16:48 +020019#include <asm/debug.h>
20#include <asm/qdio.h>
Michael Holzheu3ab121a2012-03-11 11:59:32 -040021#include <asm/ipl.h>
Jan Glauber779e6e12008-07-17 17:16:48 +020022
23#include "cio.h"
24#include "css.h"
25#include "device.h"
26#include "qdio.h"
27#include "qdio_debug.h"
Jan Glauber779e6e12008-07-17 17:16:48 +020028
29MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
30 "Jan Glauber <jang@linux.vnet.ibm.com>");
31MODULE_DESCRIPTION("QDIO base support");
32MODULE_LICENSE("GPL");
33
Jan Glauber958c0ba2011-01-05 12:47:52 +010034static inline int do_siga_sync(unsigned long schid,
35 unsigned int out_mask, unsigned int in_mask,
36 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020037{
Jan Glauber958c0ba2011-01-05 12:47:52 +010038 register unsigned long __fc asm ("0") = fc;
39 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020040 register unsigned long out asm ("2") = out_mask;
41 register unsigned long in asm ("3") = in_mask;
42 int cc;
43
44 asm volatile(
45 " siga 0\n"
46 " ipm %0\n"
47 " srl %0,28\n"
48 : "=d" (cc)
49 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 return cc;
51}
52
Jan Glauber958c0ba2011-01-05 12:47:52 +010053static inline int do_siga_input(unsigned long schid, unsigned int mask,
54 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020055{
Jan Glauber958c0ba2011-01-05 12:47:52 +010056 register unsigned long __fc asm ("0") = fc;
57 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020058 register unsigned long __mask asm ("2") = mask;
59 int cc;
60
61 asm volatile(
62 " siga 0\n"
63 " ipm %0\n"
64 " srl %0,28\n"
65 : "=d" (cc)
Jan Glauber1549d132012-05-09 16:27:34 +020066 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
Jan Glauber779e6e12008-07-17 17:16:48 +020067 return cc;
68}
69
70/**
71 * do_siga_output - perform SIGA-w/wt function
72 * @schid: subchannel id or in case of QEBSM the subchannel token
73 * @mask: which output queues to process
74 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
75 * @fc: function code to perform
76 *
Jan Glauber1549d132012-05-09 16:27:34 +020077 * Returns condition code.
Jan Glauber779e6e12008-07-17 17:16:48 +020078 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 */
80static inline int do_siga_output(unsigned long schid, unsigned long mask,
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000081 unsigned int *bb, unsigned int fc,
82 unsigned long aob)
Jan Glauber779e6e12008-07-17 17:16:48 +020083{
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000087 register unsigned long __aob asm("3") = aob;
Jan Glauber1549d132012-05-09 16:27:34 +020088 int cc;
Jan Glauber779e6e12008-07-17 17:16:48 +020089
90 asm volatile(
91 " siga 0\n"
Jan Glauber1549d132012-05-09 16:27:34 +020092 " ipm %0\n"
Jan Glauber779e6e12008-07-17 17:16:48 +020093 " srl %0,28\n"
Jan Glauber1549d132012-05-09 16:27:34 +020094 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 : "d" (__schid), "d" (__mask)
96 : "cc");
97 *bb = __fc >> 31;
Jan Glauber779e6e12008-07-17 17:16:48 +020098 return cc;
99}
100
101static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
102{
Jan Glauber779e6e12008-07-17 17:16:48 +0200103 /* all done or next buffer state different */
104 if (ccq == 0 || ccq == 32)
105 return 0;
Jan Glauber25f269f2011-10-30 15:17:06 +0100106 /* no buffer processed */
107 if (ccq == 97)
Jan Glauber779e6e12008-07-17 17:16:48 +0200108 return 1;
Jan Glauber25f269f2011-10-30 15:17:06 +0100109 /* not all buffers processed */
110 if (ccq == 96)
111 return 2;
Jan Glauber779e6e12008-07-17 17:16:48 +0200112 /* notify devices immediately */
Jan Glauber22f99342008-12-25 13:38:46 +0100113 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200114 return -EIO;
115}
116
117/**
118 * qdio_do_eqbs - extract buffer states for QEBSM
119 * @q: queue to manipulate
120 * @state: state of the extracted buffers
121 * @start: buffer number to start at
122 * @count: count of buffers to examine
Jan Glauber50f769d2008-12-25 13:38:47 +0100123 * @auto_ack: automatically acknowledge buffers
Jan Glauber779e6e12008-07-17 17:16:48 +0200124 *
Coly Li73ac36e2009-01-07 18:09:16 -0800125 * Returns the number of successfully extracted equal buffer states.
Jan Glauber779e6e12008-07-17 17:16:48 +0200126 * Stops processing if a state is different from the last buffers state.
127 */
128static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
Jan Glauber50f769d2008-12-25 13:38:47 +0100129 int start, int count, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200130{
Jan Glauber25f269f2011-10-30 15:17:06 +0100131 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200132 unsigned int ccq = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200133
134 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100135 qperf_inc(q, eqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200136
137 if (!q->is_input_q)
138 nr += q->irq_ptr->nr_input_qs;
139again:
Jan Glauber50f769d2008-12-25 13:38:47 +0100140 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
141 auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200142 rc = qdio_check_ccq(q, ccq);
Jan Glauber25f269f2011-10-30 15:17:06 +0100143 if (!rc)
144 return count - tmp_count;
Jan Glauber22f99342008-12-25 13:38:46 +0100145
Jan Glauber779e6e12008-07-17 17:16:48 +0200146 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100147 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200148 goto again;
149 }
150
Jan Glauber25f269f2011-10-30 15:17:06 +0100151 if (rc == 2) {
152 BUG_ON(tmp_count == count);
153 qperf_inc(q, eqbs_partial);
154 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
155 tmp_count);
156 /*
157 * Retry once, if that fails bail out and process the
158 * extracted buffers before trying again.
159 */
160 if (!retried++)
161 goto again;
162 else
163 return count - tmp_count;
Jan Glauber779e6e12008-07-17 17:16:48 +0200164 }
Jan Glauber25f269f2011-10-30 15:17:06 +0100165
166 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
167 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber1549d132012-05-09 16:27:34 +0200168 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
Steffen Maier7b3cc672012-03-02 17:32:58 +0100169 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
Jan Glauber25f269f2011-10-30 15:17:06 +0100170 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200171}
172
173/**
174 * qdio_do_sqbs - set buffer states for QEBSM
175 * @q: queue to manipulate
176 * @state: new state of the buffers
177 * @start: first buffer number to change
178 * @count: how many buffers to change
179 *
180 * Returns the number of successfully changed buffers.
181 * Does retrying until the specified count of buffer states is set or an
182 * error occurs.
183 */
184static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
185 int count)
186{
187 unsigned int ccq = 0;
188 int tmp_count = count, tmp_start = start;
189 int nr = q->nr;
190 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200191
Jan Glauber50f769d2008-12-25 13:38:47 +0100192 if (!count)
193 return 0;
194
Jan Glauber779e6e12008-07-17 17:16:48 +0200195 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100196 qperf_inc(q, sqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200197
198 if (!q->is_input_q)
199 nr += q->irq_ptr->nr_input_qs;
200again:
201 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
202 rc = qdio_check_ccq(q, ccq);
Jan Glauber25f269f2011-10-30 15:17:06 +0100203 if (!rc) {
204 WARN_ON(tmp_count);
205 return count - tmp_count;
206 }
207
208 if (rc == 1 || rc == 2) {
Jan Glauber22f99342008-12-25 13:38:46 +0100209 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
Jan Glauber6486cda2010-01-04 09:05:42 +0100210 qperf_inc(q, sqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200211 goto again;
212 }
Jan Glauber25f269f2011-10-30 15:17:06 +0100213
214 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
215 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber1549d132012-05-09 16:27:34 +0200216 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
Steffen Maier7b3cc672012-03-02 17:32:58 +0100217 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
Jan Glauber25f269f2011-10-30 15:17:06 +0100218 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200219}
220
221/* returns number of examined buffers and their common state in *state */
222static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
Jan Glauber50f769d2008-12-25 13:38:47 +0100223 unsigned char *state, unsigned int count,
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000224 int auto_ack, int merge_pending)
Jan Glauber779e6e12008-07-17 17:16:48 +0200225{
226 unsigned char __state = 0;
227 int i;
228
229 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
230 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
231
232 if (is_qebsm(q))
Jan Glauber50f769d2008-12-25 13:38:47 +0100233 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200234
235 for (i = 0; i < count; i++) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000236 if (!__state) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200237 __state = q->slsb.val[bufnr];
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000238 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
239 __state = SLSB_P_OUTPUT_EMPTY;
240 } else if (merge_pending) {
241 if ((q->slsb.val[bufnr] & __state) != __state)
242 break;
243 } else if (q->slsb.val[bufnr] != __state)
Jan Glauber779e6e12008-07-17 17:16:48 +0200244 break;
245 bufnr = next_buf(bufnr);
246 }
247 *state = __state;
248 return i;
249}
250
Jan Glauber60b5df22009-06-22 12:08:10 +0200251static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
252 unsigned char *state, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200253{
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000254 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200255}
256
257/* wrap-around safe setting of slsb states, returns number of changed buffers */
258static inline int set_buf_states(struct qdio_q *q, int bufnr,
259 unsigned char state, int count)
260{
261 int i;
262
263 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
264 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
265
266 if (is_qebsm(q))
267 return qdio_do_sqbs(q, state, bufnr, count);
268
269 for (i = 0; i < count; i++) {
270 xchg(&q->slsb.val[bufnr], state);
271 bufnr = next_buf(bufnr);
272 }
273 return count;
274}
275
276static inline int set_buf_state(struct qdio_q *q, int bufnr,
277 unsigned char state)
278{
279 return set_buf_states(q, bufnr, state, 1);
280}
281
282/* set slsb states to initial state */
Martin Schwidefskyc4736d92011-10-30 15:17:11 +0100283static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
Jan Glauber779e6e12008-07-17 17:16:48 +0200284{
285 struct qdio_q *q;
286 int i;
287
288 for_each_input_queue(irq_ptr, q, i)
289 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
290 QDIO_MAX_BUFFERS_PER_Q);
291 for_each_output_queue(irq_ptr, q, i)
292 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
293 QDIO_MAX_BUFFERS_PER_Q);
294}
295
Jan Glauber60b5df22009-06-22 12:08:10 +0200296static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
Jan Glauber779e6e12008-07-17 17:16:48 +0200297 unsigned int input)
298{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100299 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
300 unsigned int fc = QDIO_SIGA_SYNC;
Jan Glauber779e6e12008-07-17 17:16:48 +0200301 int cc;
302
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100303 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100304 qperf_inc(q, siga_sync);
Jan Glauber779e6e12008-07-17 17:16:48 +0200305
Jan Glauber958c0ba2011-01-05 12:47:52 +0100306 if (is_qebsm(q)) {
307 schid = q->irq_ptr->sch_token;
308 fc |= QDIO_SIGA_QEBSM_FLAG;
309 }
310
311 cc = do_siga_sync(schid, output, input, fc);
Jan Glauber110da312011-01-05 12:47:53 +0100312 if (unlikely(cc))
Jan Glauber22f99342008-12-25 13:38:46 +0100313 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
Jan Glauber1549d132012-05-09 16:27:34 +0200314 return (cc) ? -EIO : 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200315}
316
Jan Glauber60b5df22009-06-22 12:08:10 +0200317static inline int qdio_siga_sync_q(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200318{
319 if (q->is_input_q)
320 return qdio_siga_sync(q, 0, q->mask);
321 else
322 return qdio_siga_sync(q, q->mask, 0);
323}
324
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000325static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
326 unsigned long aob)
Jan Glauber779e6e12008-07-17 17:16:48 +0200327{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100328 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
329 unsigned int fc = QDIO_SIGA_WRITE;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100330 u64 start_time = 0;
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200331 int retries = 0, cc;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000332 unsigned long laob = 0;
333
334 if (q->u.out.use_cq && aob != 0) {
335 fc = QDIO_SIGA_WRITEQ;
336 laob = aob;
337 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200338
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100339 if (is_qebsm(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200340 schid = q->irq_ptr->sch_token;
Jan Glauber958c0ba2011-01-05 12:47:52 +0100341 fc |= QDIO_SIGA_QEBSM_FLAG;
Jan Glauber779e6e12008-07-17 17:16:48 +0200342 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200343again:
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000344 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
345 (aob && fc != QDIO_SIGA_WRITEQ));
346 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
Jan Glauber58eb27c2008-08-21 19:46:34 +0200347
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100348 /* hipersocket busy condition */
Jan Glauber110da312011-01-05 12:47:53 +0100349 if (unlikely(*busy_bit)) {
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100350 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200351 retries++;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100352
353 if (!start_time) {
Jan Glauber3a601bf2010-05-17 10:00:17 +0200354 start_time = get_clock();
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100355 goto again;
356 }
Jan Glauber3a601bf2010-05-17 10:00:17 +0200357 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
Jan Glauber779e6e12008-07-17 17:16:48 +0200358 goto again;
359 }
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200360 if (retries) {
361 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
362 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
363 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
364 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200365 return cc;
366}
367
368static inline int qdio_siga_input(struct qdio_q *q)
369{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100370 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
371 unsigned int fc = QDIO_SIGA_READ;
Jan Glauber779e6e12008-07-17 17:16:48 +0200372 int cc;
373
Jan Glauber22f99342008-12-25 13:38:46 +0100374 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100375 qperf_inc(q, siga_read);
Jan Glauber779e6e12008-07-17 17:16:48 +0200376
Jan Glauber958c0ba2011-01-05 12:47:52 +0100377 if (is_qebsm(q)) {
378 schid = q->irq_ptr->sch_token;
379 fc |= QDIO_SIGA_QEBSM_FLAG;
380 }
381
382 cc = do_siga_input(schid, q->mask, fc);
Jan Glauber110da312011-01-05 12:47:53 +0100383 if (unlikely(cc))
Jan Glauber22f99342008-12-25 13:38:46 +0100384 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
Jan Glauber1549d132012-05-09 16:27:34 +0200385 return (cc) ? -EIO : 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200386}
387
Jan Glauber90adac52011-01-05 12:47:54 +0100388#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
389#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
390
391static inline void qdio_sync_queues(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200392{
Jan Glauber90adac52011-01-05 12:47:54 +0100393 /* PCI capable outbound queues will also be scanned so sync them too */
394 if (pci_out_supported(q))
395 qdio_siga_sync_all(q);
396 else
Jan Glauber779e6e12008-07-17 17:16:48 +0200397 qdio_siga_sync_q(q);
398}
399
Jan Glauber60b5df22009-06-22 12:08:10 +0200400int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
401 unsigned char *state)
402{
Jan Glauber90adac52011-01-05 12:47:54 +0100403 if (need_siga_sync(q))
404 qdio_siga_sync_q(q);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000405 return get_buf_states(q, bufnr, state, 1, 0, 0);
Jan Glauber60b5df22009-06-22 12:08:10 +0200406}
407
408static inline void qdio_stop_polling(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200409{
Jan Glauber50f769d2008-12-25 13:38:47 +0100410 if (!q->u.in.polling)
Jan Glauber779e6e12008-07-17 17:16:48 +0200411 return;
Jan Glauber50f769d2008-12-25 13:38:47 +0100412
Jan Glauber779e6e12008-07-17 17:16:48 +0200413 q->u.in.polling = 0;
Jan Glauber6486cda2010-01-04 09:05:42 +0100414 qperf_inc(q, stop_polling);
Jan Glauber779e6e12008-07-17 17:16:48 +0200415
416 /* show the card that we are not polling anymore */
Jan Glauber50f769d2008-12-25 13:38:47 +0100417 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +0100418 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100419 q->u.in.ack_count);
420 q->u.in.ack_count = 0;
421 } else
Jan Glaubere85dea02009-03-26 15:24:29 +0100422 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber779e6e12008-07-17 17:16:48 +0200423}
424
Jan Glauberd3072972010-02-26 22:37:36 +0100425static inline void account_sbals(struct qdio_q *q, int count)
426{
427 int pos = 0;
428
429 q->q_stats.nr_sbal_total += count;
430 if (count == QDIO_MAX_BUFFERS_MASK) {
431 q->q_stats.nr_sbals[7]++;
432 return;
433 }
434 while (count >>= 1)
435 pos++;
436 q->q_stats.nr_sbals[pos]++;
437}
438
Jan Glauberbffbbd22011-04-20 10:15:33 +0200439static void process_buffer_error(struct qdio_q *q, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +0200440{
Jan Glauberbffbbd22011-04-20 10:15:33 +0200441 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
442 SLSB_P_OUTPUT_NOT_INIT;
443
Jan Glauber1549d132012-05-09 16:27:34 +0200444 q->qdio_error = QDIO_ERROR_SLSB_STATE;
Jan Glauber50f769d2008-12-25 13:38:47 +0100445
446 /* special handling for no target buffer empty */
447 if ((!q->is_input_q &&
Jan Glauber3ec908782011-06-06 14:14:40 +0200448 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100449 qperf_inc(q, target_full);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200450 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
Jan Glauber50f769d2008-12-25 13:38:47 +0100451 q->first_to_check);
Jan Glauber2768b2d2011-10-30 15:17:07 +0100452 goto set;
Jan Glauber50f769d2008-12-25 13:38:47 +0100453 }
454
Jan Glauber22f99342008-12-25 13:38:46 +0100455 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
456 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
Jan Glauber50f769d2008-12-25 13:38:47 +0100457 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
Jan Glauber22f99342008-12-25 13:38:46 +0100458 DBF_ERROR("F14:%2x F15:%2x",
Jan Glauber3ec908782011-06-06 14:14:40 +0200459 q->sbal[q->first_to_check]->element[14].sflags,
460 q->sbal[q->first_to_check]->element[15].sflags);
Jan Glauberbffbbd22011-04-20 10:15:33 +0200461
Jan Glauber2768b2d2011-10-30 15:17:07 +0100462set:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200463 /*
464 * Interrupts may be avoided as long as the error is present
465 * so change the buffer state immediately to avoid starvation.
466 */
467 set_buf_states(q, q->first_to_check, state, count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100468}
Jan Glauber779e6e12008-07-17 17:16:48 +0200469
Jan Glauber50f769d2008-12-25 13:38:47 +0100470static inline void inbound_primed(struct qdio_q *q, int count)
471{
472 int new;
473
Jan Glauber1d7e1502009-09-22 22:58:39 +0200474 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100475
476 /* for QEBSM the ACK was already set by EQBS */
477 if (is_qebsm(q)) {
478 if (!q->u.in.polling) {
479 q->u.in.polling = 1;
480 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100481 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100482 return;
483 }
484
485 /* delete the previous ACK's */
Jan Glaubere85dea02009-03-26 15:24:29 +0100486 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100487 q->u.in.ack_count);
488 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100489 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100490 return;
491 }
492
493 /*
494 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
495 * or by the next inbound run.
496 */
497 new = add_buf(q->first_to_check, count - 1);
498 if (q->u.in.polling) {
499 /* reset the previous ACK but first set the new one */
500 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glaubere85dea02009-03-26 15:24:29 +0100501 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100502 } else {
Jan Glauber50f769d2008-12-25 13:38:47 +0100503 q->u.in.polling = 1;
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100504 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glauber50f769d2008-12-25 13:38:47 +0100505 }
506
Jan Glaubere85dea02009-03-26 15:24:29 +0100507 q->u.in.ack_start = new;
Jan Glauber50f769d2008-12-25 13:38:47 +0100508 count--;
509 if (!count)
510 return;
Jan Glauber6541f7b2009-09-22 22:58:40 +0200511 /* need to change ALL buffers to get more interrupts */
512 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200513}
514
515static int get_inbound_buffer_frontier(struct qdio_q *q)
516{
517 int count, stop;
Jan Glauber6fa10982011-01-31 11:30:08 +0100518 unsigned char state = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200519
Heiko Carstens473e66b2012-05-09 16:27:39 +0200520 q->timestamp = get_clock();
Jan Glaubera2b86012011-10-30 15:17:05 +0100521
Jan Glauber779e6e12008-07-17 17:16:48 +0200522 /*
Jan Glauber779e6e12008-07-17 17:16:48 +0200523 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
524 * would return 0.
525 */
526 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
527 stop = add_buf(q->first_to_check, count);
528
Jan Glauber779e6e12008-07-17 17:16:48 +0200529 if (q->first_to_check == stop)
530 goto out;
531
Jan Glauber36e3e722009-06-22 12:08:12 +0200532 /*
533 * No siga sync here, as a PCI or we after a thin interrupt
534 * already sync'ed the queues.
535 */
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000536 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200537 if (!count)
538 goto out;
539
540 switch (state) {
541 case SLSB_P_INPUT_PRIMED:
Jan Glauber50f769d2008-12-25 13:38:47 +0100542 inbound_primed(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200543 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauber8bcd9b02009-12-18 17:43:26 +0100544 if (atomic_sub(count, &q->nr_buf_used) == 0)
Jan Glauber6486cda2010-01-04 09:05:42 +0100545 qperf_inc(q, inbound_queue_full);
Jan Glauberd3072972010-02-26 22:37:36 +0100546 if (q->irq_ptr->perf_stat_enabled)
547 account_sbals(q, count);
Jan Glauber36e3e722009-06-22 12:08:12 +0200548 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200549 case SLSB_P_INPUT_ERROR:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200550 process_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200551 q->first_to_check = add_buf(q->first_to_check, count);
552 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100553 if (q->irq_ptr->perf_stat_enabled)
554 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200555 break;
556 case SLSB_CU_INPUT_EMPTY:
557 case SLSB_P_INPUT_NOT_INIT:
558 case SLSB_P_INPUT_ACK:
Jan Glauberd3072972010-02-26 22:37:36 +0100559 if (q->irq_ptr->perf_stat_enabled)
560 q->q_stats.nr_sbal_nop++;
Jan Glauber22f99342008-12-25 13:38:46 +0100561 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
Jan Glauber779e6e12008-07-17 17:16:48 +0200562 break;
563 default:
564 BUG();
565 }
566out:
Jan Glauber779e6e12008-07-17 17:16:48 +0200567 return q->first_to_check;
568}
569
Jan Glauber60b5df22009-06-22 12:08:10 +0200570static int qdio_inbound_q_moved(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200571{
572 int bufnr;
573
574 bufnr = get_inbound_buffer_frontier(q);
575
Jan Glauber1549d132012-05-09 16:27:34 +0200576 if (bufnr != q->last_move) {
Jan Glaubere85dea02009-03-26 15:24:29 +0100577 q->last_move = bufnr;
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100578 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
Jan Glauber3a601bf2010-05-17 10:00:17 +0200579 q->u.in.timestamp = get_clock();
Jan Glauber779e6e12008-07-17 17:16:48 +0200580 return 1;
581 } else
582 return 0;
583}
584
Jan Glauber9a2c1602009-06-22 12:08:11 +0200585static inline int qdio_inbound_q_done(struct qdio_q *q)
Jan Glauber60b5df22009-06-22 12:08:10 +0200586{
587 unsigned char state = 0;
588
589 if (!atomic_read(&q->nr_buf_used))
590 return 1;
591
Jan Glauber90adac52011-01-05 12:47:54 +0100592 if (need_siga_sync(q))
593 qdio_siga_sync_q(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200594 get_buf_state(q, q->first_to_check, &state, 0);
595
Ursula Braun4c522282010-02-09 09:46:07 +0100596 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
Jan Glauber60b5df22009-06-22 12:08:10 +0200597 /* more work coming */
598 return 0;
Jan Glauber9a2c1602009-06-22 12:08:11 +0200599
600 if (is_thinint_irq(q->irq_ptr))
601 return 1;
602
603 /* don't poll under z/VM */
604 if (MACHINE_IS_VM)
605 return 1;
606
607 /*
608 * At this point we know, that inbound first_to_check
609 * has (probably) not moved (see qdio_inbound_processing).
610 */
Jan Glauber3a601bf2010-05-17 10:00:17 +0200611 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
Jan Glauber1d7e1502009-09-22 22:58:39 +0200612 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
Jan Glauber9a2c1602009-06-22 12:08:11 +0200613 q->first_to_check);
614 return 1;
615 } else
616 return 0;
Jan Glauber60b5df22009-06-22 12:08:10 +0200617}
618
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000619static inline int contains_aobs(struct qdio_q *q)
620{
621 return !q->is_input_q && q->u.out.use_cq;
622}
623
624static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
625 int i, struct qaob *aob)
626{
627 int tmp;
628
629 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
630 (unsigned long) virt_to_phys(aob));
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
632 (unsigned long) aob->res0[0]);
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
634 (unsigned long) aob->res0[1]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
636 (unsigned long) aob->res0[2]);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
638 (unsigned long) aob->res0[3]);
639 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
640 (unsigned long) aob->res0[4]);
641 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
642 (unsigned long) aob->res0[5]);
643 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
644 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
645 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
646 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
647 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
648 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
649 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
650 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
651 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
652 (unsigned long) aob->sba[tmp]);
653 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
654 (unsigned long) q->sbal[i]->element[tmp].addr);
655 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
656 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
657 q->sbal[i]->element[tmp].length);
658 }
659 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
660 for (tmp = 0; tmp < 2; ++tmp) {
661 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
662 (unsigned long) aob->res4[tmp]);
663 }
664 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
665 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
666}
667
668static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
669{
670 unsigned char state = 0;
671 int j, b = start;
672
673 if (!contains_aobs(q))
674 return;
675
676 for (j = 0; j < count; ++j) {
677 get_buf_state(q, b, &state, 0);
678 if (state == SLSB_P_OUTPUT_PENDING) {
679 struct qaob *aob = q->u.out.aobs[b];
680 if (aob == NULL)
681 continue;
682
683 BUG_ON(q->u.out.sbal_state == NULL);
684 q->u.out.sbal_state[b].flags |=
685 QDIO_OUTBUF_STATE_FLAG_PENDING;
686 q->u.out.aobs[b] = NULL;
687 } else if (state == SLSB_P_OUTPUT_EMPTY) {
688 BUG_ON(q->u.out.sbal_state == NULL);
689 q->u.out.sbal_state[b].aob = NULL;
690 }
691 b = next_buf(b);
692 }
693}
694
695static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
696 int bufnr)
697{
698 unsigned long phys_aob = 0;
699
700 if (!q->use_cq)
701 goto out;
702
703 if (!q->aobs[bufnr]) {
704 struct qaob *aob = qdio_allocate_aob();
705 q->aobs[bufnr] = aob;
706 }
707 if (q->aobs[bufnr]) {
708 BUG_ON(q->sbal_state == NULL);
709 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
710 q->sbal_state[bufnr].aob = q->aobs[bufnr];
711 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
712 phys_aob = virt_to_phys(q->aobs[bufnr]);
713 BUG_ON(phys_aob & 0xFF);
714 }
715
716out:
717 return phys_aob;
718}
719
Jan Glauber60b5df22009-06-22 12:08:10 +0200720static void qdio_kick_handler(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200721{
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100722 int start = q->first_to_kick;
723 int end = q->first_to_check;
724 int count;
Jan Glauber779e6e12008-07-17 17:16:48 +0200725
726 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
727 return;
728
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100729 count = sub_buf(end, start);
730
731 if (q->is_input_q) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100732 qperf_inc(q, inbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200733 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100734 } else {
Jan Glauber6486cda2010-01-04 09:05:42 +0100735 qperf_inc(q, outbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200736 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
737 start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100738 }
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100739
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000740 qdio_handle_aobs(q, start, count);
741
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100742 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
743 q->irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200744
745 /* for the next time */
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100746 q->first_to_kick = end;
Jan Glauber779e6e12008-07-17 17:16:48 +0200747 q->qdio_error = 0;
748}
749
750static void __qdio_inbound_processing(struct qdio_q *q)
751{
Jan Glauber6486cda2010-01-04 09:05:42 +0100752 qperf_inc(q, tasklet_inbound);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200753
Jan Glauber779e6e12008-07-17 17:16:48 +0200754 if (!qdio_inbound_q_moved(q))
755 return;
756
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100757 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200758
Jan Glauber6486cda2010-01-04 09:05:42 +0100759 if (!qdio_inbound_q_done(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200760 /* means poll time is not yet over */
Jan Glauber6486cda2010-01-04 09:05:42 +0100761 qperf_inc(q, tasklet_inbound_resched);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200762 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
763 tasklet_schedule(&q->tasklet);
764 return;
765 }
Jan Glauber6486cda2010-01-04 09:05:42 +0100766 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200767
768 qdio_stop_polling(q);
769 /*
770 * We need to check again to not lose initiative after
771 * resetting the ACK state.
772 */
Jan Glauber6486cda2010-01-04 09:05:42 +0100773 if (!qdio_inbound_q_done(q)) {
774 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200775 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
776 tasklet_schedule(&q->tasklet);
Jan Glauber6486cda2010-01-04 09:05:42 +0100777 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200778}
779
Jan Glauber779e6e12008-07-17 17:16:48 +0200780void qdio_inbound_processing(unsigned long data)
781{
782 struct qdio_q *q = (struct qdio_q *)data;
783 __qdio_inbound_processing(q);
784}
785
786static int get_outbound_buffer_frontier(struct qdio_q *q)
787{
788 int count, stop;
Jan Glauber6fa10982011-01-31 11:30:08 +0100789 unsigned char state = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200790
Heiko Carstens473e66b2012-05-09 16:27:39 +0200791 q->timestamp = get_clock();
Jan Glaubera2b86012011-10-30 15:17:05 +0100792
Jan Glauber90adac52011-01-05 12:47:54 +0100793 if (need_siga_sync(q))
794 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
795 !pci_out_supported(q)) ||
796 (queue_type(q) == QDIO_IQDIO_QFMT &&
797 multicast_outbound(q)))
798 qdio_siga_sync_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200799
800 /*
801 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
802 * would return 0.
803 */
804 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
805 stop = add_buf(q->first_to_check, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200806 if (q->first_to_check == stop)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000807 goto out;
Jan Glauber779e6e12008-07-17 17:16:48 +0200808
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000809 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
Jan Glauber779e6e12008-07-17 17:16:48 +0200810 if (!count)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000811 goto out;
Jan Glauber779e6e12008-07-17 17:16:48 +0200812
813 switch (state) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000814 case SLSB_P_OUTPUT_PENDING:
815 BUG();
Jan Glauber779e6e12008-07-17 17:16:48 +0200816 case SLSB_P_OUTPUT_EMPTY:
817 /* the adapter got it */
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000818 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
819 "out empty:%1d %02x", q->nr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200820
821 atomic_sub(count, &q->nr_buf_used);
822 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauberd3072972010-02-26 22:37:36 +0100823 if (q->irq_ptr->perf_stat_enabled)
824 account_sbals(q, count);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000825
Jan Glauber36e3e722009-06-22 12:08:12 +0200826 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200827 case SLSB_P_OUTPUT_ERROR:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200828 process_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200829 q->first_to_check = add_buf(q->first_to_check, count);
830 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100831 if (q->irq_ptr->perf_stat_enabled)
832 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200833 break;
834 case SLSB_CU_OUTPUT_PRIMED:
835 /* the adapter has not fetched the output yet */
Jan Glauberd3072972010-02-26 22:37:36 +0100836 if (q->irq_ptr->perf_stat_enabled)
837 q->q_stats.nr_sbal_nop++;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000838 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
839 q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200840 break;
841 case SLSB_P_OUTPUT_NOT_INIT:
842 case SLSB_P_OUTPUT_HALTED:
843 break;
844 default:
845 BUG();
846 }
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000847
848out:
Jan Glauber779e6e12008-07-17 17:16:48 +0200849 return q->first_to_check;
850}
851
852/* all buffers processed? */
853static inline int qdio_outbound_q_done(struct qdio_q *q)
854{
855 return atomic_read(&q->nr_buf_used) == 0;
856}
857
858static inline int qdio_outbound_q_moved(struct qdio_q *q)
859{
860 int bufnr;
861
862 bufnr = get_outbound_buffer_frontier(q);
863
Jan Glauber1549d132012-05-09 16:27:34 +0200864 if (bufnr != q->last_move) {
Jan Glaubere85dea02009-03-26 15:24:29 +0100865 q->last_move = bufnr;
Jan Glauber22f99342008-12-25 13:38:46 +0100866 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200867 return 1;
868 } else
869 return 0;
870}
871
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000872static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
Jan Glauber779e6e12008-07-17 17:16:48 +0200873{
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200874 int retries = 0, cc;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100875 unsigned int busy_bit;
Jan Glauber779e6e12008-07-17 17:16:48 +0200876
877 if (!need_siga_out(q))
Jan Glauberd303b6f2009-03-26 15:24:31 +0100878 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200879
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100880 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200881retry:
Jan Glauber6486cda2010-01-04 09:05:42 +0100882 qperf_inc(q, siga_write);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100883
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000884 cc = qdio_siga_output(q, &busy_bit, aob);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100885 switch (cc) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200886 case 0:
Jan Glauber779e6e12008-07-17 17:16:48 +0200887 break;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100888 case 2:
889 if (busy_bit) {
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200890 while (++retries < QDIO_BUSY_BIT_RETRIES) {
891 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
892 goto retry;
893 }
894 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
Jan Glauber1549d132012-05-09 16:27:34 +0200895 cc = -EBUSY;
896 } else {
Jan Glauberd303b6f2009-03-26 15:24:31 +0100897 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
Jan Glauber1549d132012-05-09 16:27:34 +0200898 cc = -ENOBUFS;
899 }
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100900 break;
901 case 1:
902 case 3:
903 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
Jan Glauber1549d132012-05-09 16:27:34 +0200904 cc = -EIO;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100905 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200906 }
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200907 if (retries) {
908 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
909 DBF_ERROR("count:%u", retries);
910 }
Jan Glauberd303b6f2009-03-26 15:24:31 +0100911 return cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200912}
913
Jan Glauber779e6e12008-07-17 17:16:48 +0200914static void __qdio_outbound_processing(struct qdio_q *q)
915{
Jan Glauber6486cda2010-01-04 09:05:42 +0100916 qperf_inc(q, tasklet_outbound);
Jan Glauber779e6e12008-07-17 17:16:48 +0200917 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
918
919 if (qdio_outbound_q_moved(q))
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100920 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200921
Jan Glauberc38f9602009-03-26 15:24:26 +0100922 if (queue_type(q) == QDIO_ZFCP_QFMT)
Jan Glauber779e6e12008-07-17 17:16:48 +0200923 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
Jan Glauberc38f9602009-03-26 15:24:26 +0100924 goto sched;
Jan Glauber779e6e12008-07-17 17:16:48 +0200925
Jan Glauber779e6e12008-07-17 17:16:48 +0200926 if (q->u.out.pci_out_enabled)
927 return;
928
929 /*
930 * Now we know that queue type is either qeth without pci enabled
Jan Glauber25f269f2011-10-30 15:17:06 +0100931 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
932 * is noticed and outbound_handler is called after some time.
Jan Glauber779e6e12008-07-17 17:16:48 +0200933 */
934 if (qdio_outbound_q_done(q))
935 del_timer(&q->u.out.timer);
Jan Glauber6486cda2010-01-04 09:05:42 +0100936 else
937 if (!timer_pending(&q->u.out.timer))
Jan Glauber779e6e12008-07-17 17:16:48 +0200938 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
Jan Glauberc38f9602009-03-26 15:24:26 +0100939 return;
940
941sched:
942 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
943 return;
944 tasklet_schedule(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +0200945}
946
947/* outbound tasklet */
948void qdio_outbound_processing(unsigned long data)
949{
950 struct qdio_q *q = (struct qdio_q *)data;
951 __qdio_outbound_processing(q);
952}
953
954void qdio_outbound_timer(unsigned long data)
955{
956 struct qdio_q *q = (struct qdio_q *)data;
Jan Glauberc38f9602009-03-26 15:24:26 +0100957
958 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
959 return;
Jan Glauber779e6e12008-07-17 17:16:48 +0200960 tasklet_schedule(&q->tasklet);
961}
962
Jan Glauber60b5df22009-06-22 12:08:10 +0200963static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200964{
965 struct qdio_q *out;
966 int i;
967
968 if (!pci_out_supported(q))
969 return;
970
971 for_each_output_queue(q->irq_ptr, out, i)
972 if (!qdio_outbound_q_done(out))
973 tasklet_schedule(&out->tasklet);
974}
975
Jan Glauber60b5df22009-06-22 12:08:10 +0200976static void __tiqdio_inbound_processing(struct qdio_q *q)
977{
Jan Glauber6486cda2010-01-04 09:05:42 +0100978 qperf_inc(q, tasklet_inbound);
Jan Glauber90adac52011-01-05 12:47:54 +0100979 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
980 qdio_sync_queues(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200981
982 /*
983 * The interrupt could be caused by a PCI request. Check the
984 * PCI capable outbound queues.
985 */
986 qdio_check_outbound_after_thinint(q);
987
988 if (!qdio_inbound_q_moved(q))
989 return;
990
991 qdio_kick_handler(q);
992
Jan Glauber9a2c1602009-06-22 12:08:11 +0200993 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100994 qperf_inc(q, tasklet_inbound_resched);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200995 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
Jan Glauber60b5df22009-06-22 12:08:10 +0200996 tasklet_schedule(&q->tasklet);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200997 return;
998 }
Jan Glauber60b5df22009-06-22 12:08:10 +0200999 }
1000
1001 qdio_stop_polling(q);
1002 /*
1003 * We need to check again to not lose initiative after
1004 * resetting the ACK state.
1005 */
Jan Glauber9a2c1602009-06-22 12:08:11 +02001006 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +01001007 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauber60b5df22009-06-22 12:08:10 +02001008 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1009 tasklet_schedule(&q->tasklet);
1010 }
1011}
1012
1013void tiqdio_inbound_processing(unsigned long data)
1014{
1015 struct qdio_q *q = (struct qdio_q *)data;
1016 __tiqdio_inbound_processing(q);
1017}
1018
Jan Glauber779e6e12008-07-17 17:16:48 +02001019static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1020 enum qdio_irq_states state)
1021{
Jan Glauber22f99342008-12-25 13:38:46 +01001022 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
Jan Glauber779e6e12008-07-17 17:16:48 +02001023
1024 irq_ptr->state = state;
1025 mb();
1026}
1027
Jan Glauber22f99342008-12-25 13:38:46 +01001028static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
Jan Glauber779e6e12008-07-17 17:16:48 +02001029{
Jan Glauber779e6e12008-07-17 17:16:48 +02001030 if (irb->esw.esw0.erw.cons) {
Jan Glauber22f99342008-12-25 13:38:46 +01001031 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1032 DBF_ERROR_HEX(irb, 64);
1033 DBF_ERROR_HEX(irb->ecw, 64);
Jan Glauber779e6e12008-07-17 17:16:48 +02001034 }
1035}
1036
1037/* PCI interrupt handler */
1038static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1039{
1040 int i;
1041 struct qdio_q *q;
1042
Jan Glauberc38f9602009-03-26 15:24:26 +01001043 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1044 return;
1045
Jan Glauberd36deae2010-09-07 21:14:39 +00001046 for_each_input_queue(irq_ptr, q, i) {
1047 if (q->u.in.queue_start_poll) {
1048 /* skip if polling is enabled or already in work */
1049 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1050 &q->u.in.queue_irq_state)) {
1051 qperf_inc(q, int_discarded);
1052 continue;
1053 }
1054 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1055 q->irq_ptr->int_parm);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001056 } else {
Jan Glauberd36deae2010-09-07 21:14:39 +00001057 tasklet_schedule(&q->tasklet);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001058 }
Jan Glauberd36deae2010-09-07 21:14:39 +00001059 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001060
Jan Glauber90adac52011-01-05 12:47:54 +01001061 if (!pci_out_supported(q))
Jan Glauber779e6e12008-07-17 17:16:48 +02001062 return;
1063
1064 for_each_output_queue(irq_ptr, q, i) {
1065 if (qdio_outbound_q_done(q))
1066 continue;
Jan Glauber90adac52011-01-05 12:47:54 +01001067 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
Jan Glauber779e6e12008-07-17 17:16:48 +02001068 qdio_siga_sync_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001069 tasklet_schedule(&q->tasklet);
1070 }
1071}
1072
1073static void qdio_handle_activate_check(struct ccw_device *cdev,
1074 unsigned long intparm, int cstat, int dstat)
1075{
1076 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1077 struct qdio_q *q;
Swen Schilligdfe5bb52011-08-15 14:40:31 +02001078 int count;
Jan Glauber779e6e12008-07-17 17:16:48 +02001079
Jan Glauber22f99342008-12-25 13:38:46 +01001080 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1081 DBF_ERROR("intp :%lx", intparm);
1082 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
Jan Glauber779e6e12008-07-17 17:16:48 +02001083
1084 if (irq_ptr->nr_input_qs) {
1085 q = irq_ptr->input_qs[0];
1086 } else if (irq_ptr->nr_output_qs) {
1087 q = irq_ptr->output_qs[0];
1088 } else {
1089 dump_stack();
1090 goto no_handler;
1091 }
Swen Schilligdfe5bb52011-08-15 14:40:31 +02001092
1093 count = sub_buf(q->first_to_check, q->first_to_kick);
Jan Glauber1549d132012-05-09 16:27:34 +02001094 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
Swen Schilligdfe5bb52011-08-15 14:40:31 +02001095 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +02001096no_handler:
1097 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
Michael Holzheu3ab121a2012-03-11 11:59:32 -04001098 /*
1099 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1100 * Therefore we call the LGR detection function here.
1101 */
1102 lgr_info_log();
Jan Glauber779e6e12008-07-17 17:16:48 +02001103}
1104
Jan Glauber779e6e12008-07-17 17:16:48 +02001105static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1106 int dstat)
1107{
1108 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001109
Jan Glauber22f99342008-12-25 13:38:46 +01001110 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
Jan Glauber4c575422009-06-12 10:26:28 +02001111
1112 if (cstat)
1113 goto error;
1114 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1115 goto error;
1116 if (!(dstat & DEV_STAT_DEV_END))
1117 goto error;
1118 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1119 return;
1120
1121error:
1122 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1123 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1124 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
Jan Glauber779e6e12008-07-17 17:16:48 +02001125}
1126
1127/* qdio interrupt handler */
1128void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1129 struct irb *irb)
1130{
1131 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1132 int cstat, dstat;
Jan Glauber779e6e12008-07-17 17:16:48 +02001133
Jan Glauber779e6e12008-07-17 17:16:48 +02001134 if (!intparm || !irq_ptr) {
Jan Glauber22f99342008-12-25 13:38:46 +01001135 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001136 return;
1137 }
1138
Jan Glauber09a308f2010-05-17 10:00:14 +02001139 if (irq_ptr->perf_stat_enabled)
1140 irq_ptr->perf_stat.qdio_int++;
1141
Jan Glauber779e6e12008-07-17 17:16:48 +02001142 if (IS_ERR(irb)) {
1143 switch (PTR_ERR(irb)) {
1144 case -EIO:
Jan Glauber22f99342008-12-25 13:38:46 +01001145 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
Jan Glauber75cb71f2009-04-14 15:36:22 +02001146 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1147 wake_up(&cdev->private->wait_q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001148 return;
1149 default:
1150 WARN_ON(1);
1151 return;
1152 }
1153 }
Jan Glauber22f99342008-12-25 13:38:46 +01001154 qdio_irq_check_sense(irq_ptr, irb);
Jan Glauber779e6e12008-07-17 17:16:48 +02001155 cstat = irb->scsw.cmd.cstat;
1156 dstat = irb->scsw.cmd.dstat;
1157
1158 switch (irq_ptr->state) {
1159 case QDIO_IRQ_STATE_INACTIVE:
1160 qdio_establish_handle_irq(cdev, cstat, dstat);
1161 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001162 case QDIO_IRQ_STATE_CLEANUP:
1163 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1164 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001165 case QDIO_IRQ_STATE_ESTABLISHED:
1166 case QDIO_IRQ_STATE_ACTIVE:
1167 if (cstat & SCHN_STAT_PCI) {
1168 qdio_int_handler_pci(irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001169 return;
1170 }
Jan Glauber4c575422009-06-12 10:26:28 +02001171 if (cstat || dstat)
Jan Glauber779e6e12008-07-17 17:16:48 +02001172 qdio_handle_activate_check(cdev, intparm, cstat,
1173 dstat);
Jan Glauber4c575422009-06-12 10:26:28 +02001174 break;
Jan Glauber959153d2010-02-09 09:46:08 +01001175 case QDIO_IRQ_STATE_STOPPED:
1176 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001177 default:
1178 WARN_ON(1);
1179 }
1180 wake_up(&cdev->private->wait_q);
1181}
1182
1183/**
1184 * qdio_get_ssqd_desc - get qdio subchannel description
1185 * @cdev: ccw device to get description for
Jan Glauberbbd50e12008-12-25 13:38:43 +01001186 * @data: where to store the ssqd
Jan Glauber779e6e12008-07-17 17:16:48 +02001187 *
Jan Glauberbbd50e12008-12-25 13:38:43 +01001188 * Returns 0 or an error code. The results of the chsc are stored in the
1189 * specified structure.
Jan Glauber779e6e12008-07-17 17:16:48 +02001190 */
Jan Glauberbbd50e12008-12-25 13:38:43 +01001191int qdio_get_ssqd_desc(struct ccw_device *cdev,
1192 struct qdio_ssqd_desc *data)
Jan Glauber779e6e12008-07-17 17:16:48 +02001193{
Jan Glauber779e6e12008-07-17 17:16:48 +02001194
Jan Glauberbbd50e12008-12-25 13:38:43 +01001195 if (!cdev || !cdev->private)
1196 return -EINVAL;
1197
Jan Glauber22f99342008-12-25 13:38:46 +01001198 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
Jan Glauberbbd50e12008-12-25 13:38:43 +01001199 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
Jan Glauber779e6e12008-07-17 17:16:48 +02001200}
1201EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1202
Jan Glauber779e6e12008-07-17 17:16:48 +02001203static void qdio_shutdown_queues(struct ccw_device *cdev)
1204{
1205 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1206 struct qdio_q *q;
1207 int i;
1208
1209 for_each_input_queue(irq_ptr, q, i)
Jan Glauberc38f9602009-03-26 15:24:26 +01001210 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001211
1212 for_each_output_queue(irq_ptr, q, i) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001213 del_timer(&q->u.out.timer);
Jan Glauberc38f9602009-03-26 15:24:26 +01001214 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001215 }
1216}
1217
1218/**
1219 * qdio_shutdown - shut down a qdio subchannel
1220 * @cdev: associated ccw device
1221 * @how: use halt or clear to shutdown
1222 */
1223int qdio_shutdown(struct ccw_device *cdev, int how)
1224{
Jan Glauber22f99342008-12-25 13:38:46 +01001225 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001226 int rc;
1227 unsigned long flags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001228
Jan Glauber779e6e12008-07-17 17:16:48 +02001229 if (!irq_ptr)
1230 return -ENODEV;
1231
Jan Glauberb4547402009-03-26 15:24:24 +01001232 BUG_ON(irqs_disabled());
Jan Glauber22f99342008-12-25 13:38:46 +01001233 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1234
Jan Glauber779e6e12008-07-17 17:16:48 +02001235 mutex_lock(&irq_ptr->setup_mutex);
1236 /*
1237 * Subchannel was already shot down. We cannot prevent being called
1238 * twice since cio may trigger a shutdown asynchronously.
1239 */
1240 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1241 mutex_unlock(&irq_ptr->setup_mutex);
1242 return 0;
1243 }
1244
Jan Glauberc38f9602009-03-26 15:24:26 +01001245 /*
1246 * Indicate that the device is going down. Scheduling the queue
1247 * tasklets is forbidden from here on.
1248 */
1249 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1250
Jan Glauber779e6e12008-07-17 17:16:48 +02001251 tiqdio_remove_input_queues(irq_ptr);
1252 qdio_shutdown_queues(cdev);
1253 qdio_shutdown_debug_entries(irq_ptr, cdev);
1254
1255 /* cleanup subchannel */
1256 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1257
1258 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1259 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1260 else
1261 /* default behaviour is halt */
1262 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1263 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001264 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1265 DBF_ERROR("rc:%4d", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001266 goto no_cleanup;
1267 }
1268
1269 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1270 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1271 wait_event_interruptible_timeout(cdev->private->wait_q,
1272 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1273 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1274 10 * HZ);
1275 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1276
1277no_cleanup:
1278 qdio_shutdown_thinint(irq_ptr);
1279
1280 /* restore interrupt handler */
1281 if ((void *)cdev->handler == (void *)qdio_int_handler)
1282 cdev->handler = irq_ptr->orig_handler;
1283 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1284
1285 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1286 mutex_unlock(&irq_ptr->setup_mutex);
Jan Glauber779e6e12008-07-17 17:16:48 +02001287 if (rc)
1288 return rc;
1289 return 0;
1290}
1291EXPORT_SYMBOL_GPL(qdio_shutdown);
1292
1293/**
1294 * qdio_free - free data structures for a qdio subchannel
1295 * @cdev: associated ccw device
1296 */
1297int qdio_free(struct ccw_device *cdev)
1298{
Jan Glauber22f99342008-12-25 13:38:46 +01001299 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001300
Jan Glauber779e6e12008-07-17 17:16:48 +02001301 if (!irq_ptr)
1302 return -ENODEV;
1303
Jan Glauber22f99342008-12-25 13:38:46 +01001304 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001305 mutex_lock(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001306
1307 if (irq_ptr->debug_area != NULL) {
1308 debug_unregister(irq_ptr->debug_area);
1309 irq_ptr->debug_area = NULL;
1310 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001311 cdev->private->qdio_data = NULL;
1312 mutex_unlock(&irq_ptr->setup_mutex);
1313
1314 qdio_release_memory(irq_ptr);
1315 return 0;
1316}
1317EXPORT_SYMBOL_GPL(qdio_free);
1318
1319/**
Jan Glauber779e6e12008-07-17 17:16:48 +02001320 * qdio_allocate - allocate qdio queues and associated data
1321 * @init_data: initialization data
1322 */
1323int qdio_allocate(struct qdio_initialize *init_data)
1324{
1325 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001326
Jan Glauber22f99342008-12-25 13:38:46 +01001327 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001328
1329 if ((init_data->no_input_qs && !init_data->input_handler) ||
1330 (init_data->no_output_qs && !init_data->output_handler))
1331 return -EINVAL;
1332
1333 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1334 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1335 return -EINVAL;
1336
1337 if ((!init_data->input_sbal_addr_array) ||
1338 (!init_data->output_sbal_addr_array))
1339 return -EINVAL;
1340
Jan Glauber779e6e12008-07-17 17:16:48 +02001341 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1342 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1343 if (!irq_ptr)
1344 goto out_err;
Jan Glauber779e6e12008-07-17 17:16:48 +02001345
1346 mutex_init(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001347 qdio_allocate_dbf(init_data, irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001348
1349 /*
1350 * Allocate a page for the chsc calls in qdio_establish.
1351 * Must be pre-allocated since a zfcp recovery will call
1352 * qdio_establish. In case of low memory and swap on a zfcp disk
1353 * we may not be able to allocate memory otherwise.
1354 */
1355 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1356 if (!irq_ptr->chsc_page)
1357 goto out_rel;
1358
1359 /* qdr is used in ccw1.cda which is u32 */
Jan Glauber3b8e3002008-08-01 16:39:17 +02001360 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Jan Glauber779e6e12008-07-17 17:16:48 +02001361 if (!irq_ptr->qdr)
1362 goto out_rel;
1363 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1364
Jan Glauber779e6e12008-07-17 17:16:48 +02001365 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1366 init_data->no_output_qs))
1367 goto out_rel;
1368
1369 init_data->cdev->private->qdio_data = irq_ptr;
1370 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1371 return 0;
1372out_rel:
1373 qdio_release_memory(irq_ptr);
1374out_err:
1375 return -ENOMEM;
1376}
1377EXPORT_SYMBOL_GPL(qdio_allocate);
1378
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001379static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1380{
1381 struct qdio_q *q = irq_ptr->input_qs[0];
1382 int i, use_cq = 0;
1383
1384 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1385 use_cq = 1;
1386
1387 for_each_output_queue(irq_ptr, q, i) {
1388 if (use_cq) {
1389 if (qdio_enable_async_operation(&q->u.out) < 0) {
1390 use_cq = 0;
1391 continue;
1392 }
1393 } else
1394 qdio_disable_async_operation(&q->u.out);
1395 }
1396 DBF_EVENT("use_cq:%d", use_cq);
1397}
1398
Jan Glauber779e6e12008-07-17 17:16:48 +02001399/**
1400 * qdio_establish - establish queues on a qdio subchannel
1401 * @init_data: initialization data
1402 */
1403int qdio_establish(struct qdio_initialize *init_data)
1404{
Jan Glauber779e6e12008-07-17 17:16:48 +02001405 struct qdio_irq *irq_ptr;
1406 struct ccw_device *cdev = init_data->cdev;
1407 unsigned long saveflags;
1408 int rc;
1409
Jan Glauber22f99342008-12-25 13:38:46 +01001410 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001411
Jan Glauber779e6e12008-07-17 17:16:48 +02001412 irq_ptr = cdev->private->qdio_data;
1413 if (!irq_ptr)
1414 return -ENODEV;
1415
1416 if (cdev->private->state != DEV_STATE_ONLINE)
1417 return -EINVAL;
1418
Jan Glauber779e6e12008-07-17 17:16:48 +02001419 mutex_lock(&irq_ptr->setup_mutex);
1420 qdio_setup_irq(init_data);
1421
1422 rc = qdio_establish_thinint(irq_ptr);
1423 if (rc) {
1424 mutex_unlock(&irq_ptr->setup_mutex);
1425 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1426 return rc;
1427 }
1428
1429 /* establish q */
1430 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1431 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1432 irq_ptr->ccw.count = irq_ptr->equeue.count;
1433 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1434
1435 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1436 ccw_device_set_options_mask(cdev, 0);
1437
1438 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1439 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001440 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1441 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001442 }
1443 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1444
1445 if (rc) {
1446 mutex_unlock(&irq_ptr->setup_mutex);
1447 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1448 return rc;
1449 }
1450
1451 wait_event_interruptible_timeout(cdev->private->wait_q,
1452 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1453 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1454
1455 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1456 mutex_unlock(&irq_ptr->setup_mutex);
1457 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1458 return -EIO;
1459 }
1460
1461 qdio_setup_ssqd_info(irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001462
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001463 qdio_detect_hsicq(irq_ptr);
1464
Jan Glauber779e6e12008-07-17 17:16:48 +02001465 /* qebsm is now setup if available, initialize buffer states */
1466 qdio_init_buf_states(irq_ptr);
1467
1468 mutex_unlock(&irq_ptr->setup_mutex);
1469 qdio_print_subchannel_info(irq_ptr, cdev);
1470 qdio_setup_debug_entries(irq_ptr, cdev);
1471 return 0;
1472}
1473EXPORT_SYMBOL_GPL(qdio_establish);
1474
1475/**
1476 * qdio_activate - activate queues on a qdio subchannel
1477 * @cdev: associated cdev
1478 */
1479int qdio_activate(struct ccw_device *cdev)
1480{
1481 struct qdio_irq *irq_ptr;
1482 int rc;
1483 unsigned long saveflags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001484
Jan Glauber22f99342008-12-25 13:38:46 +01001485 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001486
Jan Glauber779e6e12008-07-17 17:16:48 +02001487 irq_ptr = cdev->private->qdio_data;
1488 if (!irq_ptr)
1489 return -ENODEV;
1490
1491 if (cdev->private->state != DEV_STATE_ONLINE)
1492 return -EINVAL;
1493
1494 mutex_lock(&irq_ptr->setup_mutex);
1495 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1496 rc = -EBUSY;
1497 goto out;
1498 }
1499
Jan Glauber779e6e12008-07-17 17:16:48 +02001500 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1501 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1502 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1503 irq_ptr->ccw.cda = 0;
1504
1505 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1506 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1507
1508 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1509 0, DOIO_DENY_PREFETCH);
1510 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001511 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1512 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001513 }
1514 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1515
1516 if (rc)
1517 goto out;
1518
1519 if (is_thinint_irq(irq_ptr))
1520 tiqdio_add_input_queues(irq_ptr);
1521
1522 /* wait for subchannel to become active */
1523 msleep(5);
1524
1525 switch (irq_ptr->state) {
1526 case QDIO_IRQ_STATE_STOPPED:
1527 case QDIO_IRQ_STATE_ERR:
Jan Glaubere4c14e22009-03-26 15:24:25 +01001528 rc = -EIO;
1529 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001530 default:
1531 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1532 rc = 0;
1533 }
1534out:
1535 mutex_unlock(&irq_ptr->setup_mutex);
1536 return rc;
1537}
1538EXPORT_SYMBOL_GPL(qdio_activate);
1539
1540static inline int buf_in_between(int bufnr, int start, int count)
1541{
1542 int end = add_buf(start, count);
1543
1544 if (end > start) {
1545 if (bufnr >= start && bufnr < end)
1546 return 1;
1547 else
1548 return 0;
1549 }
1550
1551 /* wrap-around case */
1552 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1553 (bufnr < end))
1554 return 1;
1555 else
1556 return 0;
1557}
1558
1559/**
1560 * handle_inbound - reset processed input buffers
1561 * @q: queue containing the buffers
1562 * @callflags: flags
1563 * @bufnr: first buffer to process
1564 * @count: how many buffers are emptied
1565 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001566static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1567 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001568{
Jan Glauberd303b6f2009-03-26 15:24:31 +01001569 int used, diff;
Jan Glauber779e6e12008-07-17 17:16:48 +02001570
Jan Glauber6486cda2010-01-04 09:05:42 +01001571 qperf_inc(q, inbound_call);
1572
Jan Glauber50f769d2008-12-25 13:38:47 +01001573 if (!q->u.in.polling)
1574 goto set;
1575
1576 /* protect against stop polling setting an ACK for an emptied slsb */
1577 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1578 /* overwriting everything, just delete polling status */
1579 q->u.in.polling = 0;
1580 q->u.in.ack_count = 0;
1581 goto set;
Jan Glaubere85dea02009-03-26 15:24:29 +01001582 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
Jan Glauber50f769d2008-12-25 13:38:47 +01001583 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +01001584 /* partial overwrite, just update ack_start */
Jan Glauber50f769d2008-12-25 13:38:47 +01001585 diff = add_buf(bufnr, count);
Jan Glaubere85dea02009-03-26 15:24:29 +01001586 diff = sub_buf(diff, q->u.in.ack_start);
Jan Glauber50f769d2008-12-25 13:38:47 +01001587 q->u.in.ack_count -= diff;
1588 if (q->u.in.ack_count <= 0) {
1589 q->u.in.polling = 0;
1590 q->u.in.ack_count = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001591 goto set;
1592 }
Jan Glaubere85dea02009-03-26 15:24:29 +01001593 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
Jan Glauber50f769d2008-12-25 13:38:47 +01001594 }
1595 else
1596 /* the only ACK will be deleted, so stop polling */
Jan Glauber779e6e12008-07-17 17:16:48 +02001597 q->u.in.polling = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001598 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001599
Jan Glauber50f769d2008-12-25 13:38:47 +01001600set:
Jan Glauber779e6e12008-07-17 17:16:48 +02001601 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001602
1603 used = atomic_add_return(count, &q->nr_buf_used) - count;
1604 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1605
Jan Glauberd303b6f2009-03-26 15:24:31 +01001606 if (need_siga_in(q))
1607 return qdio_siga_input(q);
frank.blaschka@de.ibm.com9cb72842011-08-08 01:33:56 +00001608
Jan Glauberd303b6f2009-03-26 15:24:31 +01001609 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001610}
1611
1612/**
1613 * handle_outbound - process filled outbound buffers
1614 * @q: queue containing the buffers
1615 * @callflags: flags
1616 * @bufnr: first buffer to process
1617 * @count: how many buffers are filled
1618 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001619static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1620 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001621{
Jan Glauberc26001d2011-05-23 10:24:38 +02001622 unsigned char state = 0;
Jan Glauberd303b6f2009-03-26 15:24:31 +01001623 int used, rc = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001624
Jan Glauber6486cda2010-01-04 09:05:42 +01001625 qperf_inc(q, outbound_call);
Jan Glauber779e6e12008-07-17 17:16:48 +02001626
1627 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1628 used = atomic_add_return(count, &q->nr_buf_used);
1629 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1630
Jan Glauber01958432011-01-05 12:47:51 +01001631 if (used == QDIO_MAX_BUFFERS_PER_Q)
1632 qperf_inc(q, outbound_queue_full);
1633
Jan Glauber6486cda2010-01-04 09:05:42 +01001634 if (callflags & QDIO_FLAG_PCI_OUT) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001635 q->u.out.pci_out_enabled = 1;
Jan Glauber6486cda2010-01-04 09:05:42 +01001636 qperf_inc(q, pci_request_int);
Jan Glauber110da312011-01-05 12:47:53 +01001637 } else
Jan Glauber779e6e12008-07-17 17:16:48 +02001638 q->u.out.pci_out_enabled = 0;
1639
1640 if (queue_type(q) == QDIO_IQDIO_QFMT) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001641 unsigned long phys_aob = 0;
1642
1643 /* One SIGA-W per buffer required for unicast HSI */
Jan Glauber110da312011-01-05 12:47:53 +01001644 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1645
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001646 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1647
1648 rc = qdio_kick_outbound_q(q, phys_aob);
Jan Glauber90adac52011-01-05 12:47:54 +01001649 } else if (need_siga_sync(q)) {
Jan Glauber110da312011-01-05 12:47:53 +01001650 rc = qdio_siga_sync_q(q);
1651 } else {
1652 /* try to fast requeue buffers */
1653 get_buf_state(q, prev_buf(bufnr), &state, 0);
1654 if (state != SLSB_CU_OUTPUT_PRIMED)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001655 rc = qdio_kick_outbound_q(q, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +02001656 else
Jan Glauber110da312011-01-05 12:47:53 +01001657 qperf_inc(q, fast_requeue);
Jan Glauber779e6e12008-07-17 17:16:48 +02001658 }
1659
Jan Glauber3d6c76f2011-01-05 12:47:50 +01001660 /* in case of SIGA errors we must process the error immediately */
1661 if (used >= q->u.out.scan_threshold || rc)
1662 tasklet_schedule(&q->tasklet);
1663 else
1664 /* free the SBALs in case of no further traffic */
1665 if (!timer_pending(&q->u.out.timer))
1666 mod_timer(&q->u.out.timer, jiffies + HZ);
Jan Glauberd303b6f2009-03-26 15:24:31 +01001667 return rc;
Jan Glauber779e6e12008-07-17 17:16:48 +02001668}
1669
1670/**
1671 * do_QDIO - process input or output buffers
1672 * @cdev: associated ccw_device for the qdio subchannel
1673 * @callflags: input or output and special flags from the program
1674 * @q_nr: queue number
1675 * @bufnr: buffer number
1676 * @count: how many buffers to process
1677 */
1678int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
Jan Glauber66182412009-06-22 12:08:15 +02001679 int q_nr, unsigned int bufnr, unsigned int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001680{
1681 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001682
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001683
Jan Glauber66182412009-06-22 12:08:15 +02001684 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
Jan Glauber779e6e12008-07-17 17:16:48 +02001685 return -EINVAL;
1686
Jan Glauber779e6e12008-07-17 17:16:48 +02001687 irq_ptr = cdev->private->qdio_data;
1688 if (!irq_ptr)
1689 return -ENODEV;
1690
Jan Glauber1d7e1502009-09-22 22:58:39 +02001691 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1692 "do%02x b:%02x c:%02x", callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001693
1694 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
Jan Glauber1549d132012-05-09 16:27:34 +02001695 return -EIO;
Jan Glauber9a265132011-03-23 10:16:01 +01001696 if (!count)
1697 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001698 if (callflags & QDIO_FLAG_SYNC_INPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001699 return handle_inbound(irq_ptr->input_qs[q_nr],
1700 callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001701 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001702 return handle_outbound(irq_ptr->output_qs[q_nr],
1703 callflags, bufnr, count);
1704 return -EINVAL;
Jan Glauber779e6e12008-07-17 17:16:48 +02001705}
1706EXPORT_SYMBOL_GPL(do_QDIO);
1707
Jan Glauberd36deae2010-09-07 21:14:39 +00001708/**
1709 * qdio_start_irq - process input buffers
1710 * @cdev: associated ccw_device for the qdio subchannel
1711 * @nr: input queue number
1712 *
1713 * Return codes
1714 * 0 - success
1715 * 1 - irqs not started since new data is available
1716 */
1717int qdio_start_irq(struct ccw_device *cdev, int nr)
1718{
1719 struct qdio_q *q;
1720 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1721
1722 if (!irq_ptr)
1723 return -ENODEV;
1724 q = irq_ptr->input_qs[nr];
1725
1726 WARN_ON(queue_irqs_enabled(q));
1727
Jan Glauber5f4026f2011-10-30 15:17:20 +01001728 clear_nonshared_ind(irq_ptr);
Jan Glauberd36deae2010-09-07 21:14:39 +00001729 qdio_stop_polling(q);
1730 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1731
1732 /*
1733 * We need to check again to not lose initiative after
1734 * resetting the ACK state.
1735 */
Jan Glauber5f4026f2011-10-30 15:17:20 +01001736 if (test_nonshared_ind(irq_ptr))
Jan Glauberd36deae2010-09-07 21:14:39 +00001737 goto rescan;
1738 if (!qdio_inbound_q_done(q))
1739 goto rescan;
1740 return 0;
1741
1742rescan:
1743 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1744 &q->u.in.queue_irq_state))
1745 return 0;
1746 else
1747 return 1;
1748
1749}
1750EXPORT_SYMBOL(qdio_start_irq);
1751
1752/**
1753 * qdio_get_next_buffers - process input buffers
1754 * @cdev: associated ccw_device for the qdio subchannel
1755 * @nr: input queue number
1756 * @bufnr: first filled buffer number
1757 * @error: buffers are in error state
1758 *
1759 * Return codes
1760 * < 0 - error
1761 * = 0 - no new buffers found
1762 * > 0 - number of processed buffers
1763 */
1764int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1765 int *error)
1766{
1767 struct qdio_q *q;
1768 int start, end;
1769 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1770
1771 if (!irq_ptr)
1772 return -ENODEV;
1773 q = irq_ptr->input_qs[nr];
1774 WARN_ON(queue_irqs_enabled(q));
1775
Jan Glauberd36deae2010-09-07 21:14:39 +00001776 /*
Jan Glauber90adac52011-01-05 12:47:54 +01001777 * Cannot rely on automatic sync after interrupt since queues may
1778 * also be examined without interrupt.
Jan Glauberd36deae2010-09-07 21:14:39 +00001779 */
Jan Glauber90adac52011-01-05 12:47:54 +01001780 if (need_siga_sync(q))
1781 qdio_sync_queues(q);
1782
1783 /* check the PCI capable outbound queues. */
Jan Glauberd36deae2010-09-07 21:14:39 +00001784 qdio_check_outbound_after_thinint(q);
1785
1786 if (!qdio_inbound_q_moved(q))
1787 return 0;
1788
1789 /* Note: upper-layer MUST stop processing immediately here ... */
1790 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1791 return -EIO;
1792
1793 start = q->first_to_kick;
1794 end = q->first_to_check;
1795 *bufnr = start;
1796 *error = q->qdio_error;
1797
1798 /* for the next time */
1799 q->first_to_kick = end;
1800 q->qdio_error = 0;
1801 return sub_buf(end, start);
1802}
1803EXPORT_SYMBOL(qdio_get_next_buffers);
1804
1805/**
1806 * qdio_stop_irq - disable interrupt processing for the device
1807 * @cdev: associated ccw_device for the qdio subchannel
1808 * @nr: input queue number
1809 *
1810 * Return codes
1811 * 0 - interrupts were already disabled
1812 * 1 - interrupts successfully disabled
1813 */
1814int qdio_stop_irq(struct ccw_device *cdev, int nr)
1815{
1816 struct qdio_q *q;
1817 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1818
1819 if (!irq_ptr)
1820 return -ENODEV;
1821 q = irq_ptr->input_qs[nr];
1822
1823 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1824 &q->u.in.queue_irq_state))
1825 return 0;
1826 else
1827 return 1;
1828}
1829EXPORT_SYMBOL(qdio_stop_irq);
1830
Jan Glauber779e6e12008-07-17 17:16:48 +02001831static int __init init_QDIO(void)
1832{
1833 int rc;
1834
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001835 rc = qdio_debug_init();
Jan Glauber779e6e12008-07-17 17:16:48 +02001836 if (rc)
1837 return rc;
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001838 rc = qdio_setup_init();
1839 if (rc)
1840 goto out_debug;
Jan Glauber779e6e12008-07-17 17:16:48 +02001841 rc = tiqdio_allocate_memory();
1842 if (rc)
1843 goto out_cache;
Jan Glauber779e6e12008-07-17 17:16:48 +02001844 rc = tiqdio_register_thinints();
1845 if (rc)
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001846 goto out_ti;
Jan Glauber779e6e12008-07-17 17:16:48 +02001847 return 0;
1848
Jan Glauber779e6e12008-07-17 17:16:48 +02001849out_ti:
1850 tiqdio_free_memory();
1851out_cache:
1852 qdio_setup_exit();
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001853out_debug:
1854 qdio_debug_exit();
Jan Glauber779e6e12008-07-17 17:16:48 +02001855 return rc;
1856}
1857
1858static void __exit exit_QDIO(void)
1859{
1860 tiqdio_unregister_thinints();
1861 tiqdio_free_memory();
Jan Glauber779e6e12008-07-17 17:16:48 +02001862 qdio_setup_exit();
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001863 qdio_debug_exit();
Jan Glauber779e6e12008-07-17 17:16:48 +02001864}
1865
1866module_init(init_QDIO);
1867module_exit(exit_QDIO);