blob: a7153f2f3aff1ad8c6c768584fcb33feb133ec59 [file] [log] [blame]
Jan Glauber779e6e12008-07-17 17:16:48 +02001/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 *
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 */
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000017#include <linux/io.h>
Jan Glauber30d77c32011-01-05 12:47:29 +010018#include <linux/kernel_stat.h>
Arun Sharma60063492011-07-26 16:09:06 -070019#include <linux/atomic.h>
Jan Glauber779e6e12008-07-17 17:16:48 +020020#include <asm/debug.h>
21#include <asm/qdio.h>
22
23#include "cio.h"
24#include "css.h"
25#include "device.h"
26#include "qdio.h"
27#include "qdio_debug.h"
Jan Glauber779e6e12008-07-17 17:16:48 +020028
29MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
30 "Jan Glauber <jang@linux.vnet.ibm.com>");
31MODULE_DESCRIPTION("QDIO base support");
32MODULE_LICENSE("GPL");
33
Jan Glauber958c0ba2011-01-05 12:47:52 +010034static inline int do_siga_sync(unsigned long schid,
35 unsigned int out_mask, unsigned int in_mask,
36 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020037{
Jan Glauber958c0ba2011-01-05 12:47:52 +010038 register unsigned long __fc asm ("0") = fc;
39 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020040 register unsigned long out asm ("2") = out_mask;
41 register unsigned long in asm ("3") = in_mask;
42 int cc;
43
44 asm volatile(
45 " siga 0\n"
46 " ipm %0\n"
47 " srl %0,28\n"
48 : "=d" (cc)
49 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
50 return cc;
51}
52
Jan Glauber958c0ba2011-01-05 12:47:52 +010053static inline int do_siga_input(unsigned long schid, unsigned int mask,
54 unsigned int fc)
Jan Glauber779e6e12008-07-17 17:16:48 +020055{
Jan Glauber958c0ba2011-01-05 12:47:52 +010056 register unsigned long __fc asm ("0") = fc;
57 register unsigned long __schid asm ("1") = schid;
Jan Glauber779e6e12008-07-17 17:16:48 +020058 register unsigned long __mask asm ("2") = mask;
59 int cc;
60
61 asm volatile(
62 " siga 0\n"
63 " ipm %0\n"
64 " srl %0,28\n"
65 : "=d" (cc)
66 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
67 return cc;
68}
69
70/**
71 * do_siga_output - perform SIGA-w/wt function
72 * @schid: subchannel id or in case of QEBSM the subchannel token
73 * @mask: which output queues to process
74 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
75 * @fc: function code to perform
76 *
77 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
78 * Note: For IQDC unicast queues only the highest priority queue is processed.
79 */
80static inline int do_siga_output(unsigned long schid, unsigned long mask,
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000081 unsigned int *bb, unsigned int fc,
82 unsigned long aob)
Jan Glauber779e6e12008-07-17 17:16:48 +020083{
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000087 register unsigned long __aob asm("3") = aob;
Jan Glauber779e6e12008-07-17 17:16:48 +020088 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
89
90 asm volatile(
91 " siga 0\n"
92 "0: ipm %0\n"
93 " srl %0,28\n"
94 "1:\n"
95 EX_TABLE(0b, 1b)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +000096 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
97 "+d" (__aob)
Jan Glauber779e6e12008-07-17 17:16:48 +020098 : : "cc", "memory");
99 *bb = ((unsigned int) __fc) >> 31;
100 return cc;
101}
102
103static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
104{
Jan Glauber779e6e12008-07-17 17:16:48 +0200105 /* all done or next buffer state different */
106 if (ccq == 0 || ccq == 32)
107 return 0;
108 /* not all buffers processed */
109 if (ccq == 96 || ccq == 97)
110 return 1;
111 /* notify devices immediately */
Jan Glauber22f99342008-12-25 13:38:46 +0100112 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200113 return -EIO;
114}
115
116/**
117 * qdio_do_eqbs - extract buffer states for QEBSM
118 * @q: queue to manipulate
119 * @state: state of the extracted buffers
120 * @start: buffer number to start at
121 * @count: count of buffers to examine
Jan Glauber50f769d2008-12-25 13:38:47 +0100122 * @auto_ack: automatically acknowledge buffers
Jan Glauber779e6e12008-07-17 17:16:48 +0200123 *
Coly Li73ac36e2009-01-07 18:09:16 -0800124 * Returns the number of successfully extracted equal buffer states.
Jan Glauber779e6e12008-07-17 17:16:48 +0200125 * Stops processing if a state is different from the last buffers state.
126 */
127static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
Jan Glauber50f769d2008-12-25 13:38:47 +0100128 int start, int count, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200129{
130 unsigned int ccq = 0;
131 int tmp_count = count, tmp_start = start;
132 int nr = q->nr;
133 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200134
135 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100136 qperf_inc(q, eqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200137
138 if (!q->is_input_q)
139 nr += q->irq_ptr->nr_input_qs;
140again:
Jan Glauber50f769d2008-12-25 13:38:47 +0100141 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
142 auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200143 rc = qdio_check_ccq(q, ccq);
144
145 /* At least one buffer was processed, return and extract the remaining
146 * buffers later.
147 */
Jan Glauber23589d02008-12-25 13:38:44 +0100148 if ((ccq == 96) && (count != tmp_count)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100149 qperf_inc(q, eqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200150 return (count - tmp_count);
Jan Glauber23589d02008-12-25 13:38:44 +0100151 }
Jan Glauber22f99342008-12-25 13:38:46 +0100152
Jan Glauber779e6e12008-07-17 17:16:48 +0200153 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100154 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
Jan Glauber779e6e12008-07-17 17:16:48 +0200155 goto again;
156 }
157
158 if (rc < 0) {
Jan Glauber22f99342008-12-25 13:38:46 +0100159 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
160 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200161 q->handler(q->irq_ptr->cdev,
162 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
163 0, -1, -1, q->irq_ptr->int_parm);
164 return 0;
165 }
166 return count - tmp_count;
167}
168
169/**
170 * qdio_do_sqbs - set buffer states for QEBSM
171 * @q: queue to manipulate
172 * @state: new state of the buffers
173 * @start: first buffer number to change
174 * @count: how many buffers to change
175 *
176 * Returns the number of successfully changed buffers.
177 * Does retrying until the specified count of buffer states is set or an
178 * error occurs.
179 */
180static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
181 int count)
182{
183 unsigned int ccq = 0;
184 int tmp_count = count, tmp_start = start;
185 int nr = q->nr;
186 int rc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200187
Jan Glauber50f769d2008-12-25 13:38:47 +0100188 if (!count)
189 return 0;
190
Jan Glauber779e6e12008-07-17 17:16:48 +0200191 BUG_ON(!q->irq_ptr->sch_token);
Jan Glauber6486cda2010-01-04 09:05:42 +0100192 qperf_inc(q, sqbs);
Jan Glauber779e6e12008-07-17 17:16:48 +0200193
194 if (!q->is_input_q)
195 nr += q->irq_ptr->nr_input_qs;
196again:
197 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
198 rc = qdio_check_ccq(q, ccq);
199 if (rc == 1) {
Jan Glauber22f99342008-12-25 13:38:46 +0100200 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
Jan Glauber6486cda2010-01-04 09:05:42 +0100201 qperf_inc(q, sqbs_partial);
Jan Glauber779e6e12008-07-17 17:16:48 +0200202 goto again;
203 }
204 if (rc < 0) {
Jan Glauber22f99342008-12-25 13:38:46 +0100205 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
206 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200207 q->handler(q->irq_ptr->cdev,
208 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
209 0, -1, -1, q->irq_ptr->int_parm);
210 return 0;
211 }
212 WARN_ON(tmp_count);
213 return count - tmp_count;
214}
215
216/* returns number of examined buffers and their common state in *state */
217static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
Jan Glauber50f769d2008-12-25 13:38:47 +0100218 unsigned char *state, unsigned int count,
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000219 int auto_ack, int merge_pending)
Jan Glauber779e6e12008-07-17 17:16:48 +0200220{
221 unsigned char __state = 0;
222 int i;
223
224 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
225 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
226
227 if (is_qebsm(q))
Jan Glauber50f769d2008-12-25 13:38:47 +0100228 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
Jan Glauber779e6e12008-07-17 17:16:48 +0200229
230 for (i = 0; i < count; i++) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000231 if (!__state) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200232 __state = q->slsb.val[bufnr];
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000233 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
234 __state = SLSB_P_OUTPUT_EMPTY;
235 } else if (merge_pending) {
236 if ((q->slsb.val[bufnr] & __state) != __state)
237 break;
238 } else if (q->slsb.val[bufnr] != __state)
Jan Glauber779e6e12008-07-17 17:16:48 +0200239 break;
240 bufnr = next_buf(bufnr);
241 }
242 *state = __state;
243 return i;
244}
245
Jan Glauber60b5df22009-06-22 12:08:10 +0200246static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
247 unsigned char *state, int auto_ack)
Jan Glauber779e6e12008-07-17 17:16:48 +0200248{
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000249 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200250}
251
252/* wrap-around safe setting of slsb states, returns number of changed buffers */
253static inline int set_buf_states(struct qdio_q *q, int bufnr,
254 unsigned char state, int count)
255{
256 int i;
257
258 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
259 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
260
261 if (is_qebsm(q))
262 return qdio_do_sqbs(q, state, bufnr, count);
263
264 for (i = 0; i < count; i++) {
265 xchg(&q->slsb.val[bufnr], state);
266 bufnr = next_buf(bufnr);
267 }
268 return count;
269}
270
271static inline int set_buf_state(struct qdio_q *q, int bufnr,
272 unsigned char state)
273{
274 return set_buf_states(q, bufnr, state, 1);
275}
276
277/* set slsb states to initial state */
278void qdio_init_buf_states(struct qdio_irq *irq_ptr)
279{
280 struct qdio_q *q;
281 int i;
282
283 for_each_input_queue(irq_ptr, q, i)
284 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
285 QDIO_MAX_BUFFERS_PER_Q);
286 for_each_output_queue(irq_ptr, q, i)
287 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
288 QDIO_MAX_BUFFERS_PER_Q);
289}
290
Jan Glauber60b5df22009-06-22 12:08:10 +0200291static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
Jan Glauber779e6e12008-07-17 17:16:48 +0200292 unsigned int input)
293{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100294 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
295 unsigned int fc = QDIO_SIGA_SYNC;
Jan Glauber779e6e12008-07-17 17:16:48 +0200296 int cc;
297
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100298 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100299 qperf_inc(q, siga_sync);
Jan Glauber779e6e12008-07-17 17:16:48 +0200300
Jan Glauber958c0ba2011-01-05 12:47:52 +0100301 if (is_qebsm(q)) {
302 schid = q->irq_ptr->sch_token;
303 fc |= QDIO_SIGA_QEBSM_FLAG;
304 }
305
306 cc = do_siga_sync(schid, output, input, fc);
Jan Glauber110da312011-01-05 12:47:53 +0100307 if (unlikely(cc))
Jan Glauber22f99342008-12-25 13:38:46 +0100308 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200309 return cc;
310}
311
Jan Glauber60b5df22009-06-22 12:08:10 +0200312static inline int qdio_siga_sync_q(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200313{
314 if (q->is_input_q)
315 return qdio_siga_sync(q, 0, q->mask);
316 else
317 return qdio_siga_sync(q, q->mask, 0);
318}
319
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000320static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
321 unsigned long aob)
Jan Glauber779e6e12008-07-17 17:16:48 +0200322{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100323 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
324 unsigned int fc = QDIO_SIGA_WRITE;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100325 u64 start_time = 0;
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200326 int retries = 0, cc;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000327 unsigned long laob = 0;
328
329 if (q->u.out.use_cq && aob != 0) {
330 fc = QDIO_SIGA_WRITEQ;
331 laob = aob;
332 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200333
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100334 if (is_qebsm(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200335 schid = q->irq_ptr->sch_token;
Jan Glauber958c0ba2011-01-05 12:47:52 +0100336 fc |= QDIO_SIGA_QEBSM_FLAG;
Jan Glauber779e6e12008-07-17 17:16:48 +0200337 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200338again:
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000339 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
340 (aob && fc != QDIO_SIGA_WRITEQ));
341 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
Jan Glauber58eb27c2008-08-21 19:46:34 +0200342
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100343 /* hipersocket busy condition */
Jan Glauber110da312011-01-05 12:47:53 +0100344 if (unlikely(*busy_bit)) {
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100345 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200346 retries++;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100347
348 if (!start_time) {
Jan Glauber3a601bf2010-05-17 10:00:17 +0200349 start_time = get_clock();
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100350 goto again;
351 }
Jan Glauber3a601bf2010-05-17 10:00:17 +0200352 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
Jan Glauber779e6e12008-07-17 17:16:48 +0200353 goto again;
354 }
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200355 if (retries) {
356 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
357 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
358 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
359 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200360 return cc;
361}
362
363static inline int qdio_siga_input(struct qdio_q *q)
364{
Jan Glauber958c0ba2011-01-05 12:47:52 +0100365 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
366 unsigned int fc = QDIO_SIGA_READ;
Jan Glauber779e6e12008-07-17 17:16:48 +0200367 int cc;
368
Jan Glauber22f99342008-12-25 13:38:46 +0100369 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
Jan Glauber6486cda2010-01-04 09:05:42 +0100370 qperf_inc(q, siga_read);
Jan Glauber779e6e12008-07-17 17:16:48 +0200371
Jan Glauber958c0ba2011-01-05 12:47:52 +0100372 if (is_qebsm(q)) {
373 schid = q->irq_ptr->sch_token;
374 fc |= QDIO_SIGA_QEBSM_FLAG;
375 }
376
377 cc = do_siga_input(schid, q->mask, fc);
Jan Glauber110da312011-01-05 12:47:53 +0100378 if (unlikely(cc))
Jan Glauber22f99342008-12-25 13:38:46 +0100379 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
Jan Glauber779e6e12008-07-17 17:16:48 +0200380 return cc;
381}
382
Jan Glauber90adac52011-01-05 12:47:54 +0100383#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
384#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
385
386static inline void qdio_sync_queues(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200387{
Jan Glauber90adac52011-01-05 12:47:54 +0100388 /* PCI capable outbound queues will also be scanned so sync them too */
389 if (pci_out_supported(q))
390 qdio_siga_sync_all(q);
391 else
Jan Glauber779e6e12008-07-17 17:16:48 +0200392 qdio_siga_sync_q(q);
393}
394
Jan Glauber60b5df22009-06-22 12:08:10 +0200395int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
396 unsigned char *state)
397{
Jan Glauber90adac52011-01-05 12:47:54 +0100398 if (need_siga_sync(q))
399 qdio_siga_sync_q(q);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000400 return get_buf_states(q, bufnr, state, 1, 0, 0);
Jan Glauber60b5df22009-06-22 12:08:10 +0200401}
402
403static inline void qdio_stop_polling(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200404{
Jan Glauber50f769d2008-12-25 13:38:47 +0100405 if (!q->u.in.polling)
Jan Glauber779e6e12008-07-17 17:16:48 +0200406 return;
Jan Glauber50f769d2008-12-25 13:38:47 +0100407
Jan Glauber779e6e12008-07-17 17:16:48 +0200408 q->u.in.polling = 0;
Jan Glauber6486cda2010-01-04 09:05:42 +0100409 qperf_inc(q, stop_polling);
Jan Glauber779e6e12008-07-17 17:16:48 +0200410
411 /* show the card that we are not polling anymore */
Jan Glauber50f769d2008-12-25 13:38:47 +0100412 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +0100413 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100414 q->u.in.ack_count);
415 q->u.in.ack_count = 0;
416 } else
Jan Glaubere85dea02009-03-26 15:24:29 +0100417 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber779e6e12008-07-17 17:16:48 +0200418}
419
Jan Glauberd3072972010-02-26 22:37:36 +0100420static inline void account_sbals(struct qdio_q *q, int count)
421{
422 int pos = 0;
423
424 q->q_stats.nr_sbal_total += count;
425 if (count == QDIO_MAX_BUFFERS_MASK) {
426 q->q_stats.nr_sbals[7]++;
427 return;
428 }
429 while (count >>= 1)
430 pos++;
431 q->q_stats.nr_sbals[pos]++;
432}
433
Jan Glauberbffbbd22011-04-20 10:15:33 +0200434static void process_buffer_error(struct qdio_q *q, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +0200435{
Jan Glauberbffbbd22011-04-20 10:15:33 +0200436 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
437 SLSB_P_OUTPUT_NOT_INIT;
438
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100439 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
Jan Glauber50f769d2008-12-25 13:38:47 +0100440
441 /* special handling for no target buffer empty */
442 if ((!q->is_input_q &&
Jan Glauber3ec90872011-06-06 14:14:40 +0200443 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100444 qperf_inc(q, target_full);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200445 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
Jan Glauber50f769d2008-12-25 13:38:47 +0100446 q->first_to_check);
447 return;
448 }
449
Jan Glauber22f99342008-12-25 13:38:46 +0100450 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
451 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
Jan Glauber50f769d2008-12-25 13:38:47 +0100452 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
Jan Glauber22f99342008-12-25 13:38:46 +0100453 DBF_ERROR("F14:%2x F15:%2x",
Jan Glauber3ec90872011-06-06 14:14:40 +0200454 q->sbal[q->first_to_check]->element[14].sflags,
455 q->sbal[q->first_to_check]->element[15].sflags);
Jan Glauberbffbbd22011-04-20 10:15:33 +0200456
457 /*
458 * Interrupts may be avoided as long as the error is present
459 * so change the buffer state immediately to avoid starvation.
460 */
461 set_buf_states(q, q->first_to_check, state, count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100462}
Jan Glauber779e6e12008-07-17 17:16:48 +0200463
Jan Glauber50f769d2008-12-25 13:38:47 +0100464static inline void inbound_primed(struct qdio_q *q, int count)
465{
466 int new;
467
Jan Glauber1d7e1502009-09-22 22:58:39 +0200468 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
Jan Glauber50f769d2008-12-25 13:38:47 +0100469
470 /* for QEBSM the ACK was already set by EQBS */
471 if (is_qebsm(q)) {
472 if (!q->u.in.polling) {
473 q->u.in.polling = 1;
474 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100475 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100476 return;
477 }
478
479 /* delete the previous ACK's */
Jan Glaubere85dea02009-03-26 15:24:29 +0100480 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
Jan Glauber50f769d2008-12-25 13:38:47 +0100481 q->u.in.ack_count);
482 q->u.in.ack_count = count;
Jan Glaubere85dea02009-03-26 15:24:29 +0100483 q->u.in.ack_start = q->first_to_check;
Jan Glauber50f769d2008-12-25 13:38:47 +0100484 return;
485 }
486
487 /*
488 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
489 * or by the next inbound run.
490 */
491 new = add_buf(q->first_to_check, count - 1);
492 if (q->u.in.polling) {
493 /* reset the previous ACK but first set the new one */
494 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glaubere85dea02009-03-26 15:24:29 +0100495 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100496 } else {
Jan Glauber50f769d2008-12-25 13:38:47 +0100497 q->u.in.polling = 1;
Jan Glauber3fdf1e12009-03-26 15:24:28 +0100498 set_buf_state(q, new, SLSB_P_INPUT_ACK);
Jan Glauber50f769d2008-12-25 13:38:47 +0100499 }
500
Jan Glaubere85dea02009-03-26 15:24:29 +0100501 q->u.in.ack_start = new;
Jan Glauber50f769d2008-12-25 13:38:47 +0100502 count--;
503 if (!count)
504 return;
Jan Glauber6541f7b2009-09-22 22:58:40 +0200505 /* need to change ALL buffers to get more interrupts */
506 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200507}
508
509static int get_inbound_buffer_frontier(struct qdio_q *q)
510{
511 int count, stop;
Jan Glauber6fa10982011-01-31 11:30:08 +0100512 unsigned char state = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200513
514 /*
Jan Glauber779e6e12008-07-17 17:16:48 +0200515 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
516 * would return 0.
517 */
518 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
519 stop = add_buf(q->first_to_check, count);
520
Jan Glauber779e6e12008-07-17 17:16:48 +0200521 if (q->first_to_check == stop)
522 goto out;
523
Jan Glauber36e3e722009-06-22 12:08:12 +0200524 /*
525 * No siga sync here, as a PCI or we after a thin interrupt
526 * already sync'ed the queues.
527 */
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000528 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +0200529 if (!count)
530 goto out;
531
532 switch (state) {
533 case SLSB_P_INPUT_PRIMED:
Jan Glauber50f769d2008-12-25 13:38:47 +0100534 inbound_primed(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200535 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauber8bcd9b02009-12-18 17:43:26 +0100536 if (atomic_sub(count, &q->nr_buf_used) == 0)
Jan Glauber6486cda2010-01-04 09:05:42 +0100537 qperf_inc(q, inbound_queue_full);
Jan Glauberd3072972010-02-26 22:37:36 +0100538 if (q->irq_ptr->perf_stat_enabled)
539 account_sbals(q, count);
Jan Glauber36e3e722009-06-22 12:08:12 +0200540 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200541 case SLSB_P_INPUT_ERROR:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200542 process_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200543 q->first_to_check = add_buf(q->first_to_check, count);
544 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100545 if (q->irq_ptr->perf_stat_enabled)
546 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200547 break;
548 case SLSB_CU_INPUT_EMPTY:
549 case SLSB_P_INPUT_NOT_INIT:
550 case SLSB_P_INPUT_ACK:
Jan Glauberd3072972010-02-26 22:37:36 +0100551 if (q->irq_ptr->perf_stat_enabled)
552 q->q_stats.nr_sbal_nop++;
Jan Glauber22f99342008-12-25 13:38:46 +0100553 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
Jan Glauber779e6e12008-07-17 17:16:48 +0200554 break;
555 default:
556 BUG();
557 }
558out:
Jan Glauber779e6e12008-07-17 17:16:48 +0200559 return q->first_to_check;
560}
561
Jan Glauber60b5df22009-06-22 12:08:10 +0200562static int qdio_inbound_q_moved(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200563{
564 int bufnr;
565
566 bufnr = get_inbound_buffer_frontier(q);
567
Jan Glaubere85dea02009-03-26 15:24:29 +0100568 if ((bufnr != q->last_move) || q->qdio_error) {
569 q->last_move = bufnr;
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100570 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
Jan Glauber3a601bf2010-05-17 10:00:17 +0200571 q->u.in.timestamp = get_clock();
Jan Glauber779e6e12008-07-17 17:16:48 +0200572 return 1;
573 } else
574 return 0;
575}
576
Jan Glauber9a2c1602009-06-22 12:08:11 +0200577static inline int qdio_inbound_q_done(struct qdio_q *q)
Jan Glauber60b5df22009-06-22 12:08:10 +0200578{
579 unsigned char state = 0;
580
581 if (!atomic_read(&q->nr_buf_used))
582 return 1;
583
Jan Glauber90adac52011-01-05 12:47:54 +0100584 if (need_siga_sync(q))
585 qdio_siga_sync_q(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200586 get_buf_state(q, q->first_to_check, &state, 0);
587
Ursula Braun4c522282010-02-09 09:46:07 +0100588 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
Jan Glauber60b5df22009-06-22 12:08:10 +0200589 /* more work coming */
590 return 0;
Jan Glauber9a2c1602009-06-22 12:08:11 +0200591
592 if (is_thinint_irq(q->irq_ptr))
593 return 1;
594
595 /* don't poll under z/VM */
596 if (MACHINE_IS_VM)
597 return 1;
598
599 /*
600 * At this point we know, that inbound first_to_check
601 * has (probably) not moved (see qdio_inbound_processing).
602 */
Jan Glauber3a601bf2010-05-17 10:00:17 +0200603 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
Jan Glauber1d7e1502009-09-22 22:58:39 +0200604 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
Jan Glauber9a2c1602009-06-22 12:08:11 +0200605 q->first_to_check);
606 return 1;
607 } else
608 return 0;
Jan Glauber60b5df22009-06-22 12:08:10 +0200609}
610
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000611static inline int contains_aobs(struct qdio_q *q)
612{
613 return !q->is_input_q && q->u.out.use_cq;
614}
615
616static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
617 int i, struct qaob *aob)
618{
619 int tmp;
620
621 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
622 (unsigned long) virt_to_phys(aob));
623 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
624 (unsigned long) aob->res0[0]);
625 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
626 (unsigned long) aob->res0[1]);
627 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
628 (unsigned long) aob->res0[2]);
629 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
630 (unsigned long) aob->res0[3]);
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
632 (unsigned long) aob->res0[4]);
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
634 (unsigned long) aob->res0[5]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
636 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
638 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
639 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
640 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
641 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
642 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
643 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
644 (unsigned long) aob->sba[tmp]);
645 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
646 (unsigned long) q->sbal[i]->element[tmp].addr);
647 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
648 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
649 q->sbal[i]->element[tmp].length);
650 }
651 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
652 for (tmp = 0; tmp < 2; ++tmp) {
653 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
654 (unsigned long) aob->res4[tmp]);
655 }
656 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
657 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
658}
659
660static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
661{
662 unsigned char state = 0;
663 int j, b = start;
664
665 if (!contains_aobs(q))
666 return;
667
668 for (j = 0; j < count; ++j) {
669 get_buf_state(q, b, &state, 0);
670 if (state == SLSB_P_OUTPUT_PENDING) {
671 struct qaob *aob = q->u.out.aobs[b];
672 if (aob == NULL)
673 continue;
674
675 BUG_ON(q->u.out.sbal_state == NULL);
676 q->u.out.sbal_state[b].flags |=
677 QDIO_OUTBUF_STATE_FLAG_PENDING;
678 q->u.out.aobs[b] = NULL;
679 } else if (state == SLSB_P_OUTPUT_EMPTY) {
680 BUG_ON(q->u.out.sbal_state == NULL);
681 q->u.out.sbal_state[b].aob = NULL;
682 }
683 b = next_buf(b);
684 }
685}
686
687static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
688 int bufnr)
689{
690 unsigned long phys_aob = 0;
691
692 if (!q->use_cq)
693 goto out;
694
695 if (!q->aobs[bufnr]) {
696 struct qaob *aob = qdio_allocate_aob();
697 q->aobs[bufnr] = aob;
698 }
699 if (q->aobs[bufnr]) {
700 BUG_ON(q->sbal_state == NULL);
701 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
702 q->sbal_state[bufnr].aob = q->aobs[bufnr];
703 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
704 phys_aob = virt_to_phys(q->aobs[bufnr]);
705 BUG_ON(phys_aob & 0xFF);
706 }
707
708out:
709 return phys_aob;
710}
711
Jan Glauber60b5df22009-06-22 12:08:10 +0200712static void qdio_kick_handler(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200713{
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100714 int start = q->first_to_kick;
715 int end = q->first_to_check;
716 int count;
Jan Glauber779e6e12008-07-17 17:16:48 +0200717
718 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
719 return;
720
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100721 count = sub_buf(end, start);
722
723 if (q->is_input_q) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100724 qperf_inc(q, inbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200725 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100726 } else {
Jan Glauber6486cda2010-01-04 09:05:42 +0100727 qperf_inc(q, outbound_handler);
Jan Glauber1d7e1502009-09-22 22:58:39 +0200728 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
729 start, count);
Ursula Braunbd6e8a12010-03-08 12:25:18 +0100730 }
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100731
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000732 qdio_handle_aobs(q, start, count);
733
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100734 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
735 q->irq_ptr->int_parm);
Jan Glauber779e6e12008-07-17 17:16:48 +0200736
737 /* for the next time */
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100738 q->first_to_kick = end;
Jan Glauber779e6e12008-07-17 17:16:48 +0200739 q->qdio_error = 0;
740}
741
742static void __qdio_inbound_processing(struct qdio_q *q)
743{
Jan Glauber6486cda2010-01-04 09:05:42 +0100744 qperf_inc(q, tasklet_inbound);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200745
Jan Glauber779e6e12008-07-17 17:16:48 +0200746 if (!qdio_inbound_q_moved(q))
747 return;
748
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100749 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200750
Jan Glauber6486cda2010-01-04 09:05:42 +0100751 if (!qdio_inbound_q_done(q)) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200752 /* means poll time is not yet over */
Jan Glauber6486cda2010-01-04 09:05:42 +0100753 qperf_inc(q, tasklet_inbound_resched);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200754 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
755 tasklet_schedule(&q->tasklet);
756 return;
757 }
Jan Glauber6486cda2010-01-04 09:05:42 +0100758 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200759
760 qdio_stop_polling(q);
761 /*
762 * We need to check again to not lose initiative after
763 * resetting the ACK state.
764 */
Jan Glauber6486cda2010-01-04 09:05:42 +0100765 if (!qdio_inbound_q_done(q)) {
766 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauberf3eb20f2010-05-17 10:00:15 +0200767 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
768 tasklet_schedule(&q->tasklet);
Jan Glauber6486cda2010-01-04 09:05:42 +0100769 }
Jan Glauber779e6e12008-07-17 17:16:48 +0200770}
771
Jan Glauber779e6e12008-07-17 17:16:48 +0200772void qdio_inbound_processing(unsigned long data)
773{
774 struct qdio_q *q = (struct qdio_q *)data;
775 __qdio_inbound_processing(q);
776}
777
778static int get_outbound_buffer_frontier(struct qdio_q *q)
779{
780 int count, stop;
Jan Glauber6fa10982011-01-31 11:30:08 +0100781 unsigned char state = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200782
Jan Glauber90adac52011-01-05 12:47:54 +0100783 if (need_siga_sync(q))
784 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
785 !pci_out_supported(q)) ||
786 (queue_type(q) == QDIO_IQDIO_QFMT &&
787 multicast_outbound(q)))
788 qdio_siga_sync_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200789
790 /*
791 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
792 * would return 0.
793 */
794 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
795 stop = add_buf(q->first_to_check, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200796 if (q->first_to_check == stop)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000797 goto out;
Jan Glauber779e6e12008-07-17 17:16:48 +0200798
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000799 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
Jan Glauber779e6e12008-07-17 17:16:48 +0200800 if (!count)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000801 goto out;
Jan Glauber779e6e12008-07-17 17:16:48 +0200802
803 switch (state) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000804 case SLSB_P_OUTPUT_PENDING:
805 BUG();
Jan Glauber779e6e12008-07-17 17:16:48 +0200806 case SLSB_P_OUTPUT_EMPTY:
807 /* the adapter got it */
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000808 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
809 "out empty:%1d %02x", q->nr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200810
811 atomic_sub(count, &q->nr_buf_used);
812 q->first_to_check = add_buf(q->first_to_check, count);
Jan Glauberd3072972010-02-26 22:37:36 +0100813 if (q->irq_ptr->perf_stat_enabled)
814 account_sbals(q, count);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000815
Jan Glauber36e3e722009-06-22 12:08:12 +0200816 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200817 case SLSB_P_OUTPUT_ERROR:
Jan Glauberbffbbd22011-04-20 10:15:33 +0200818 process_buffer_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200819 q->first_to_check = add_buf(q->first_to_check, count);
820 atomic_sub(count, &q->nr_buf_used);
Jan Glauberd3072972010-02-26 22:37:36 +0100821 if (q->irq_ptr->perf_stat_enabled)
822 account_sbals_error(q, count);
Jan Glauber779e6e12008-07-17 17:16:48 +0200823 break;
824 case SLSB_CU_OUTPUT_PRIMED:
825 /* the adapter has not fetched the output yet */
Jan Glauberd3072972010-02-26 22:37:36 +0100826 if (q->irq_ptr->perf_stat_enabled)
827 q->q_stats.nr_sbal_nop++;
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000828 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
829 q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200830 break;
831 case SLSB_P_OUTPUT_NOT_INIT:
832 case SLSB_P_OUTPUT_HALTED:
833 break;
834 default:
835 BUG();
836 }
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000837
838out:
Jan Glauber779e6e12008-07-17 17:16:48 +0200839 return q->first_to_check;
840}
841
842/* all buffers processed? */
843static inline int qdio_outbound_q_done(struct qdio_q *q)
844{
845 return atomic_read(&q->nr_buf_used) == 0;
846}
847
848static inline int qdio_outbound_q_moved(struct qdio_q *q)
849{
850 int bufnr;
851
852 bufnr = get_outbound_buffer_frontier(q);
853
Jan Glaubere85dea02009-03-26 15:24:29 +0100854 if ((bufnr != q->last_move) || q->qdio_error) {
855 q->last_move = bufnr;
Jan Glauber22f99342008-12-25 13:38:46 +0100856 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
Jan Glauber779e6e12008-07-17 17:16:48 +0200857 return 1;
858 } else
859 return 0;
860}
861
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000862static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
Jan Glauber779e6e12008-07-17 17:16:48 +0200863{
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200864 int retries = 0, cc;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100865 unsigned int busy_bit;
Jan Glauber779e6e12008-07-17 17:16:48 +0200866
867 if (!need_siga_out(q))
Jan Glauberd303b6f2009-03-26 15:24:31 +0100868 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +0200869
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100870 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200871retry:
Jan Glauber6486cda2010-01-04 09:05:42 +0100872 qperf_inc(q, siga_write);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100873
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +0000874 cc = qdio_siga_output(q, &busy_bit, aob);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100875 switch (cc) {
Jan Glauber779e6e12008-07-17 17:16:48 +0200876 case 0:
Jan Glauber779e6e12008-07-17 17:16:48 +0200877 break;
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100878 case 2:
879 if (busy_bit) {
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200880 while (++retries < QDIO_BUSY_BIT_RETRIES) {
881 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
882 goto retry;
883 }
884 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
Jan Glauberd303b6f2009-03-26 15:24:31 +0100885 cc |= QDIO_ERROR_SIGA_BUSY;
886 } else
887 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100888 break;
889 case 1:
890 case 3:
891 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
Jan Glauber7a0b4cb2008-12-25 13:38:48 +0100892 break;
Jan Glauber779e6e12008-07-17 17:16:48 +0200893 }
Jan Glauberbe8d97a2011-08-03 16:44:17 +0200894 if (retries) {
895 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
896 DBF_ERROR("count:%u", retries);
897 }
Jan Glauberd303b6f2009-03-26 15:24:31 +0100898 return cc;
Jan Glauber779e6e12008-07-17 17:16:48 +0200899}
900
Jan Glauber779e6e12008-07-17 17:16:48 +0200901static void __qdio_outbound_processing(struct qdio_q *q)
902{
Jan Glauber6486cda2010-01-04 09:05:42 +0100903 qperf_inc(q, tasklet_outbound);
Jan Glauber779e6e12008-07-17 17:16:48 +0200904 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
905
906 if (qdio_outbound_q_moved(q))
Jan Glauber9c8a08d2009-03-26 15:24:32 +0100907 qdio_kick_handler(q);
Jan Glauber779e6e12008-07-17 17:16:48 +0200908
Jan Glauberc38f9602009-03-26 15:24:26 +0100909 if (queue_type(q) == QDIO_ZFCP_QFMT)
Jan Glauber779e6e12008-07-17 17:16:48 +0200910 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
Jan Glauberc38f9602009-03-26 15:24:26 +0100911 goto sched;
Jan Glauber779e6e12008-07-17 17:16:48 +0200912
913 /* bail out for HiperSockets unicast queues */
914 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
915 return;
916
Ursula Braun4bcb3a32008-10-10 21:33:04 +0200917 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
Jan Glauberc38f9602009-03-26 15:24:26 +0100918 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
919 goto sched;
Ursula Braun4bcb3a32008-10-10 21:33:04 +0200920
Jan Glauber779e6e12008-07-17 17:16:48 +0200921 if (q->u.out.pci_out_enabled)
922 return;
923
924 /*
925 * Now we know that queue type is either qeth without pci enabled
926 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
927 * EMPTY is noticed and outbound_handler is called after some time.
928 */
929 if (qdio_outbound_q_done(q))
930 del_timer(&q->u.out.timer);
Jan Glauber6486cda2010-01-04 09:05:42 +0100931 else
932 if (!timer_pending(&q->u.out.timer))
Jan Glauber779e6e12008-07-17 17:16:48 +0200933 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
Jan Glauberc38f9602009-03-26 15:24:26 +0100934 return;
935
936sched:
937 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
938 return;
939 tasklet_schedule(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +0200940}
941
942/* outbound tasklet */
943void qdio_outbound_processing(unsigned long data)
944{
945 struct qdio_q *q = (struct qdio_q *)data;
946 __qdio_outbound_processing(q);
947}
948
949void qdio_outbound_timer(unsigned long data)
950{
951 struct qdio_q *q = (struct qdio_q *)data;
Jan Glauberc38f9602009-03-26 15:24:26 +0100952
953 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
954 return;
Jan Glauber779e6e12008-07-17 17:16:48 +0200955 tasklet_schedule(&q->tasklet);
956}
957
Jan Glauber60b5df22009-06-22 12:08:10 +0200958static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
Jan Glauber779e6e12008-07-17 17:16:48 +0200959{
960 struct qdio_q *out;
961 int i;
962
963 if (!pci_out_supported(q))
964 return;
965
966 for_each_output_queue(q->irq_ptr, out, i)
967 if (!qdio_outbound_q_done(out))
968 tasklet_schedule(&out->tasklet);
969}
970
Jan Glauber60b5df22009-06-22 12:08:10 +0200971static void __tiqdio_inbound_processing(struct qdio_q *q)
972{
Jan Glauber6486cda2010-01-04 09:05:42 +0100973 qperf_inc(q, tasklet_inbound);
Jan Glauber90adac52011-01-05 12:47:54 +0100974 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
975 qdio_sync_queues(q);
Jan Glauber60b5df22009-06-22 12:08:10 +0200976
977 /*
978 * The interrupt could be caused by a PCI request. Check the
979 * PCI capable outbound queues.
980 */
981 qdio_check_outbound_after_thinint(q);
982
983 if (!qdio_inbound_q_moved(q))
984 return;
985
986 qdio_kick_handler(q);
987
Jan Glauber9a2c1602009-06-22 12:08:11 +0200988 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +0100989 qperf_inc(q, tasklet_inbound_resched);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200990 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
Jan Glauber60b5df22009-06-22 12:08:10 +0200991 tasklet_schedule(&q->tasklet);
Jan Glaubere2910bc2009-09-11 10:28:19 +0200992 return;
993 }
Jan Glauber60b5df22009-06-22 12:08:10 +0200994 }
995
996 qdio_stop_polling(q);
997 /*
998 * We need to check again to not lose initiative after
999 * resetting the ACK state.
1000 */
Jan Glauber9a2c1602009-06-22 12:08:11 +02001001 if (!qdio_inbound_q_done(q)) {
Jan Glauber6486cda2010-01-04 09:05:42 +01001002 qperf_inc(q, tasklet_inbound_resched2);
Jan Glauber60b5df22009-06-22 12:08:10 +02001003 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1004 tasklet_schedule(&q->tasklet);
1005 }
1006}
1007
1008void tiqdio_inbound_processing(unsigned long data)
1009{
1010 struct qdio_q *q = (struct qdio_q *)data;
1011 __tiqdio_inbound_processing(q);
1012}
1013
Jan Glauber779e6e12008-07-17 17:16:48 +02001014static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1015 enum qdio_irq_states state)
1016{
Jan Glauber22f99342008-12-25 13:38:46 +01001017 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
Jan Glauber779e6e12008-07-17 17:16:48 +02001018
1019 irq_ptr->state = state;
1020 mb();
1021}
1022
Jan Glauber22f99342008-12-25 13:38:46 +01001023static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
Jan Glauber779e6e12008-07-17 17:16:48 +02001024{
Jan Glauber779e6e12008-07-17 17:16:48 +02001025 if (irb->esw.esw0.erw.cons) {
Jan Glauber22f99342008-12-25 13:38:46 +01001026 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1027 DBF_ERROR_HEX(irb, 64);
1028 DBF_ERROR_HEX(irb->ecw, 64);
Jan Glauber779e6e12008-07-17 17:16:48 +02001029 }
1030}
1031
1032/* PCI interrupt handler */
1033static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1034{
1035 int i;
1036 struct qdio_q *q;
1037
Jan Glauberc38f9602009-03-26 15:24:26 +01001038 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1039 return;
1040
Jan Glauberd36deae2010-09-07 21:14:39 +00001041 for_each_input_queue(irq_ptr, q, i) {
1042 if (q->u.in.queue_start_poll) {
1043 /* skip if polling is enabled or already in work */
1044 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1045 &q->u.in.queue_irq_state)) {
1046 qperf_inc(q, int_discarded);
1047 continue;
1048 }
1049 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1050 q->irq_ptr->int_parm);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001051 } else {
Jan Glauberd36deae2010-09-07 21:14:39 +00001052 tasklet_schedule(&q->tasklet);
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001053 }
Jan Glauberd36deae2010-09-07 21:14:39 +00001054 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001055
Jan Glauber90adac52011-01-05 12:47:54 +01001056 if (!pci_out_supported(q))
Jan Glauber779e6e12008-07-17 17:16:48 +02001057 return;
1058
1059 for_each_output_queue(irq_ptr, q, i) {
1060 if (qdio_outbound_q_done(q))
1061 continue;
Jan Glauber90adac52011-01-05 12:47:54 +01001062 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
Jan Glauber779e6e12008-07-17 17:16:48 +02001063 qdio_siga_sync_q(q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001064 tasklet_schedule(&q->tasklet);
1065 }
1066}
1067
1068static void qdio_handle_activate_check(struct ccw_device *cdev,
1069 unsigned long intparm, int cstat, int dstat)
1070{
1071 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1072 struct qdio_q *q;
Jan Glauber779e6e12008-07-17 17:16:48 +02001073
Jan Glauber22f99342008-12-25 13:38:46 +01001074 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1075 DBF_ERROR("intp :%lx", intparm);
1076 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
Jan Glauber779e6e12008-07-17 17:16:48 +02001077
1078 if (irq_ptr->nr_input_qs) {
1079 q = irq_ptr->input_qs[0];
1080 } else if (irq_ptr->nr_output_qs) {
1081 q = irq_ptr->output_qs[0];
1082 } else {
1083 dump_stack();
1084 goto no_handler;
1085 }
1086 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1087 0, -1, -1, irq_ptr->int_parm);
1088no_handler:
1089 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1090}
1091
Jan Glauber779e6e12008-07-17 17:16:48 +02001092static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1093 int dstat)
1094{
1095 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001096
Jan Glauber22f99342008-12-25 13:38:46 +01001097 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
Jan Glauber4c575422009-06-12 10:26:28 +02001098
1099 if (cstat)
1100 goto error;
1101 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1102 goto error;
1103 if (!(dstat & DEV_STAT_DEV_END))
1104 goto error;
1105 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1106 return;
1107
1108error:
1109 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1110 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1111 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
Jan Glauber779e6e12008-07-17 17:16:48 +02001112}
1113
1114/* qdio interrupt handler */
1115void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1116 struct irb *irb)
1117{
1118 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1119 int cstat, dstat;
Jan Glauber779e6e12008-07-17 17:16:48 +02001120
Jan Glauber779e6e12008-07-17 17:16:48 +02001121 if (!intparm || !irq_ptr) {
Jan Glauber22f99342008-12-25 13:38:46 +01001122 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001123 return;
1124 }
1125
Jan Glauber30d77c32011-01-05 12:47:29 +01001126 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
Jan Glauber09a308f2010-05-17 10:00:14 +02001127 if (irq_ptr->perf_stat_enabled)
1128 irq_ptr->perf_stat.qdio_int++;
1129
Jan Glauber779e6e12008-07-17 17:16:48 +02001130 if (IS_ERR(irb)) {
1131 switch (PTR_ERR(irb)) {
1132 case -EIO:
Jan Glauber22f99342008-12-25 13:38:46 +01001133 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
Jan Glauber75cb71f2009-04-14 15:36:22 +02001134 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1135 wake_up(&cdev->private->wait_q);
Jan Glauber779e6e12008-07-17 17:16:48 +02001136 return;
1137 default:
1138 WARN_ON(1);
1139 return;
1140 }
1141 }
Jan Glauber22f99342008-12-25 13:38:46 +01001142 qdio_irq_check_sense(irq_ptr, irb);
Jan Glauber779e6e12008-07-17 17:16:48 +02001143 cstat = irb->scsw.cmd.cstat;
1144 dstat = irb->scsw.cmd.dstat;
1145
1146 switch (irq_ptr->state) {
1147 case QDIO_IRQ_STATE_INACTIVE:
1148 qdio_establish_handle_irq(cdev, cstat, dstat);
1149 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001150 case QDIO_IRQ_STATE_CLEANUP:
1151 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1152 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001153 case QDIO_IRQ_STATE_ESTABLISHED:
1154 case QDIO_IRQ_STATE_ACTIVE:
1155 if (cstat & SCHN_STAT_PCI) {
1156 qdio_int_handler_pci(irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001157 return;
1158 }
Jan Glauber4c575422009-06-12 10:26:28 +02001159 if (cstat || dstat)
Jan Glauber779e6e12008-07-17 17:16:48 +02001160 qdio_handle_activate_check(cdev, intparm, cstat,
1161 dstat);
Jan Glauber4c575422009-06-12 10:26:28 +02001162 break;
Jan Glauber959153d2010-02-09 09:46:08 +01001163 case QDIO_IRQ_STATE_STOPPED:
1164 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001165 default:
1166 WARN_ON(1);
1167 }
1168 wake_up(&cdev->private->wait_q);
1169}
1170
1171/**
1172 * qdio_get_ssqd_desc - get qdio subchannel description
1173 * @cdev: ccw device to get description for
Jan Glauberbbd50e12008-12-25 13:38:43 +01001174 * @data: where to store the ssqd
Jan Glauber779e6e12008-07-17 17:16:48 +02001175 *
Jan Glauberbbd50e12008-12-25 13:38:43 +01001176 * Returns 0 or an error code. The results of the chsc are stored in the
1177 * specified structure.
Jan Glauber779e6e12008-07-17 17:16:48 +02001178 */
Jan Glauberbbd50e12008-12-25 13:38:43 +01001179int qdio_get_ssqd_desc(struct ccw_device *cdev,
1180 struct qdio_ssqd_desc *data)
Jan Glauber779e6e12008-07-17 17:16:48 +02001181{
Jan Glauber779e6e12008-07-17 17:16:48 +02001182
Jan Glauberbbd50e12008-12-25 13:38:43 +01001183 if (!cdev || !cdev->private)
1184 return -EINVAL;
1185
Jan Glauber22f99342008-12-25 13:38:46 +01001186 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
Jan Glauberbbd50e12008-12-25 13:38:43 +01001187 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
Jan Glauber779e6e12008-07-17 17:16:48 +02001188}
1189EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1190
Jan Glauber779e6e12008-07-17 17:16:48 +02001191static void qdio_shutdown_queues(struct ccw_device *cdev)
1192{
1193 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1194 struct qdio_q *q;
1195 int i;
1196
1197 for_each_input_queue(irq_ptr, q, i)
Jan Glauberc38f9602009-03-26 15:24:26 +01001198 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001199
1200 for_each_output_queue(irq_ptr, q, i) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001201 del_timer(&q->u.out.timer);
Jan Glauberc38f9602009-03-26 15:24:26 +01001202 tasklet_kill(&q->tasklet);
Jan Glauber779e6e12008-07-17 17:16:48 +02001203 }
1204}
1205
1206/**
1207 * qdio_shutdown - shut down a qdio subchannel
1208 * @cdev: associated ccw device
1209 * @how: use halt or clear to shutdown
1210 */
1211int qdio_shutdown(struct ccw_device *cdev, int how)
1212{
Jan Glauber22f99342008-12-25 13:38:46 +01001213 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001214 int rc;
1215 unsigned long flags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001216
Jan Glauber779e6e12008-07-17 17:16:48 +02001217 if (!irq_ptr)
1218 return -ENODEV;
1219
Jan Glauberb4547402009-03-26 15:24:24 +01001220 BUG_ON(irqs_disabled());
Jan Glauber22f99342008-12-25 13:38:46 +01001221 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1222
Jan Glauber779e6e12008-07-17 17:16:48 +02001223 mutex_lock(&irq_ptr->setup_mutex);
1224 /*
1225 * Subchannel was already shot down. We cannot prevent being called
1226 * twice since cio may trigger a shutdown asynchronously.
1227 */
1228 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1229 mutex_unlock(&irq_ptr->setup_mutex);
1230 return 0;
1231 }
1232
Jan Glauberc38f9602009-03-26 15:24:26 +01001233 /*
1234 * Indicate that the device is going down. Scheduling the queue
1235 * tasklets is forbidden from here on.
1236 */
1237 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1238
Jan Glauber779e6e12008-07-17 17:16:48 +02001239 tiqdio_remove_input_queues(irq_ptr);
1240 qdio_shutdown_queues(cdev);
1241 qdio_shutdown_debug_entries(irq_ptr, cdev);
1242
1243 /* cleanup subchannel */
1244 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1245
1246 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1247 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1248 else
1249 /* default behaviour is halt */
1250 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1251 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001252 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1253 DBF_ERROR("rc:%4d", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001254 goto no_cleanup;
1255 }
1256
1257 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1258 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1259 wait_event_interruptible_timeout(cdev->private->wait_q,
1260 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1261 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1262 10 * HZ);
1263 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1264
1265no_cleanup:
1266 qdio_shutdown_thinint(irq_ptr);
1267
1268 /* restore interrupt handler */
1269 if ((void *)cdev->handler == (void *)qdio_int_handler)
1270 cdev->handler = irq_ptr->orig_handler;
1271 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1272
1273 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1274 mutex_unlock(&irq_ptr->setup_mutex);
Jan Glauber779e6e12008-07-17 17:16:48 +02001275 if (rc)
1276 return rc;
1277 return 0;
1278}
1279EXPORT_SYMBOL_GPL(qdio_shutdown);
1280
1281/**
1282 * qdio_free - free data structures for a qdio subchannel
1283 * @cdev: associated ccw device
1284 */
1285int qdio_free(struct ccw_device *cdev)
1286{
Jan Glauber22f99342008-12-25 13:38:46 +01001287 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
Jan Glauber779e6e12008-07-17 17:16:48 +02001288
Jan Glauber779e6e12008-07-17 17:16:48 +02001289 if (!irq_ptr)
1290 return -ENODEV;
1291
Jan Glauber22f99342008-12-25 13:38:46 +01001292 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001293 mutex_lock(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001294
1295 if (irq_ptr->debug_area != NULL) {
1296 debug_unregister(irq_ptr->debug_area);
1297 irq_ptr->debug_area = NULL;
1298 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001299 cdev->private->qdio_data = NULL;
1300 mutex_unlock(&irq_ptr->setup_mutex);
1301
1302 qdio_release_memory(irq_ptr);
1303 return 0;
1304}
1305EXPORT_SYMBOL_GPL(qdio_free);
1306
1307/**
Jan Glauber779e6e12008-07-17 17:16:48 +02001308 * qdio_allocate - allocate qdio queues and associated data
1309 * @init_data: initialization data
1310 */
1311int qdio_allocate(struct qdio_initialize *init_data)
1312{
1313 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001314
Jan Glauber22f99342008-12-25 13:38:46 +01001315 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
Jan Glauber779e6e12008-07-17 17:16:48 +02001316
1317 if ((init_data->no_input_qs && !init_data->input_handler) ||
1318 (init_data->no_output_qs && !init_data->output_handler))
1319 return -EINVAL;
1320
1321 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1322 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1323 return -EINVAL;
1324
1325 if ((!init_data->input_sbal_addr_array) ||
1326 (!init_data->output_sbal_addr_array))
1327 return -EINVAL;
1328
Jan Glauber779e6e12008-07-17 17:16:48 +02001329 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1330 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1331 if (!irq_ptr)
1332 goto out_err;
Jan Glauber779e6e12008-07-17 17:16:48 +02001333
1334 mutex_init(&irq_ptr->setup_mutex);
Jan Glauber22f99342008-12-25 13:38:46 +01001335 qdio_allocate_dbf(init_data, irq_ptr);
Jan Glauber779e6e12008-07-17 17:16:48 +02001336
1337 /*
1338 * Allocate a page for the chsc calls in qdio_establish.
1339 * Must be pre-allocated since a zfcp recovery will call
1340 * qdio_establish. In case of low memory and swap on a zfcp disk
1341 * we may not be able to allocate memory otherwise.
1342 */
1343 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1344 if (!irq_ptr->chsc_page)
1345 goto out_rel;
1346
1347 /* qdr is used in ccw1.cda which is u32 */
Jan Glauber3b8e3002008-08-01 16:39:17 +02001348 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Jan Glauber779e6e12008-07-17 17:16:48 +02001349 if (!irq_ptr->qdr)
1350 goto out_rel;
1351 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1352
Jan Glauber779e6e12008-07-17 17:16:48 +02001353 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1354 init_data->no_output_qs))
1355 goto out_rel;
1356
1357 init_data->cdev->private->qdio_data = irq_ptr;
1358 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1359 return 0;
1360out_rel:
1361 qdio_release_memory(irq_ptr);
1362out_err:
1363 return -ENOMEM;
1364}
1365EXPORT_SYMBOL_GPL(qdio_allocate);
1366
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001367static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1368{
1369 struct qdio_q *q = irq_ptr->input_qs[0];
1370 int i, use_cq = 0;
1371
1372 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1373 use_cq = 1;
1374
1375 for_each_output_queue(irq_ptr, q, i) {
1376 if (use_cq) {
1377 if (qdio_enable_async_operation(&q->u.out) < 0) {
1378 use_cq = 0;
1379 continue;
1380 }
1381 } else
1382 qdio_disable_async_operation(&q->u.out);
1383 }
1384 DBF_EVENT("use_cq:%d", use_cq);
1385}
1386
Jan Glauber779e6e12008-07-17 17:16:48 +02001387/**
1388 * qdio_establish - establish queues on a qdio subchannel
1389 * @init_data: initialization data
1390 */
1391int qdio_establish(struct qdio_initialize *init_data)
1392{
Jan Glauber779e6e12008-07-17 17:16:48 +02001393 struct qdio_irq *irq_ptr;
1394 struct ccw_device *cdev = init_data->cdev;
1395 unsigned long saveflags;
1396 int rc;
1397
Jan Glauber22f99342008-12-25 13:38:46 +01001398 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001399
Jan Glauber779e6e12008-07-17 17:16:48 +02001400 irq_ptr = cdev->private->qdio_data;
1401 if (!irq_ptr)
1402 return -ENODEV;
1403
1404 if (cdev->private->state != DEV_STATE_ONLINE)
1405 return -EINVAL;
1406
Jan Glauber779e6e12008-07-17 17:16:48 +02001407 mutex_lock(&irq_ptr->setup_mutex);
1408 qdio_setup_irq(init_data);
1409
1410 rc = qdio_establish_thinint(irq_ptr);
1411 if (rc) {
1412 mutex_unlock(&irq_ptr->setup_mutex);
1413 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1414 return rc;
1415 }
1416
1417 /* establish q */
1418 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1419 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1420 irq_ptr->ccw.count = irq_ptr->equeue.count;
1421 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1422
1423 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1424 ccw_device_set_options_mask(cdev, 0);
1425
1426 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1427 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001428 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1429 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001430 }
1431 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1432
1433 if (rc) {
1434 mutex_unlock(&irq_ptr->setup_mutex);
1435 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1436 return rc;
1437 }
1438
1439 wait_event_interruptible_timeout(cdev->private->wait_q,
1440 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1441 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1442
1443 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1444 mutex_unlock(&irq_ptr->setup_mutex);
1445 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1446 return -EIO;
1447 }
1448
1449 qdio_setup_ssqd_info(irq_ptr);
Jan Glauber22f99342008-12-25 13:38:46 +01001450 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
Jan Glauber779e6e12008-07-17 17:16:48 +02001451
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001452 qdio_detect_hsicq(irq_ptr);
1453
Jan Glauber779e6e12008-07-17 17:16:48 +02001454 /* qebsm is now setup if available, initialize buffer states */
1455 qdio_init_buf_states(irq_ptr);
1456
1457 mutex_unlock(&irq_ptr->setup_mutex);
1458 qdio_print_subchannel_info(irq_ptr, cdev);
1459 qdio_setup_debug_entries(irq_ptr, cdev);
1460 return 0;
1461}
1462EXPORT_SYMBOL_GPL(qdio_establish);
1463
1464/**
1465 * qdio_activate - activate queues on a qdio subchannel
1466 * @cdev: associated cdev
1467 */
1468int qdio_activate(struct ccw_device *cdev)
1469{
1470 struct qdio_irq *irq_ptr;
1471 int rc;
1472 unsigned long saveflags;
Jan Glauber779e6e12008-07-17 17:16:48 +02001473
Jan Glauber22f99342008-12-25 13:38:46 +01001474 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
Jan Glauber58eb27c2008-08-21 19:46:34 +02001475
Jan Glauber779e6e12008-07-17 17:16:48 +02001476 irq_ptr = cdev->private->qdio_data;
1477 if (!irq_ptr)
1478 return -ENODEV;
1479
1480 if (cdev->private->state != DEV_STATE_ONLINE)
1481 return -EINVAL;
1482
1483 mutex_lock(&irq_ptr->setup_mutex);
1484 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1485 rc = -EBUSY;
1486 goto out;
1487 }
1488
Jan Glauber779e6e12008-07-17 17:16:48 +02001489 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1490 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1491 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1492 irq_ptr->ccw.cda = 0;
1493
1494 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1495 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1496
1497 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1498 0, DOIO_DENY_PREFETCH);
1499 if (rc) {
Jan Glauber22f99342008-12-25 13:38:46 +01001500 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1501 DBF_ERROR("rc:%4x", rc);
Jan Glauber779e6e12008-07-17 17:16:48 +02001502 }
1503 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1504
1505 if (rc)
1506 goto out;
1507
1508 if (is_thinint_irq(irq_ptr))
1509 tiqdio_add_input_queues(irq_ptr);
1510
1511 /* wait for subchannel to become active */
1512 msleep(5);
1513
1514 switch (irq_ptr->state) {
1515 case QDIO_IRQ_STATE_STOPPED:
1516 case QDIO_IRQ_STATE_ERR:
Jan Glaubere4c14e22009-03-26 15:24:25 +01001517 rc = -EIO;
1518 break;
Jan Glauber779e6e12008-07-17 17:16:48 +02001519 default:
1520 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1521 rc = 0;
1522 }
1523out:
1524 mutex_unlock(&irq_ptr->setup_mutex);
1525 return rc;
1526}
1527EXPORT_SYMBOL_GPL(qdio_activate);
1528
1529static inline int buf_in_between(int bufnr, int start, int count)
1530{
1531 int end = add_buf(start, count);
1532
1533 if (end > start) {
1534 if (bufnr >= start && bufnr < end)
1535 return 1;
1536 else
1537 return 0;
1538 }
1539
1540 /* wrap-around case */
1541 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1542 (bufnr < end))
1543 return 1;
1544 else
1545 return 0;
1546}
1547
1548/**
1549 * handle_inbound - reset processed input buffers
1550 * @q: queue containing the buffers
1551 * @callflags: flags
1552 * @bufnr: first buffer to process
1553 * @count: how many buffers are emptied
1554 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001555static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1556 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001557{
Jan Glauberd303b6f2009-03-26 15:24:31 +01001558 int used, diff;
Jan Glauber779e6e12008-07-17 17:16:48 +02001559
Jan Glauber6486cda2010-01-04 09:05:42 +01001560 qperf_inc(q, inbound_call);
1561
Jan Glauber50f769d2008-12-25 13:38:47 +01001562 if (!q->u.in.polling)
1563 goto set;
1564
1565 /* protect against stop polling setting an ACK for an emptied slsb */
1566 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1567 /* overwriting everything, just delete polling status */
1568 q->u.in.polling = 0;
1569 q->u.in.ack_count = 0;
1570 goto set;
Jan Glaubere85dea02009-03-26 15:24:29 +01001571 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
Jan Glauber50f769d2008-12-25 13:38:47 +01001572 if (is_qebsm(q)) {
Jan Glaubere85dea02009-03-26 15:24:29 +01001573 /* partial overwrite, just update ack_start */
Jan Glauber50f769d2008-12-25 13:38:47 +01001574 diff = add_buf(bufnr, count);
Jan Glaubere85dea02009-03-26 15:24:29 +01001575 diff = sub_buf(diff, q->u.in.ack_start);
Jan Glauber50f769d2008-12-25 13:38:47 +01001576 q->u.in.ack_count -= diff;
1577 if (q->u.in.ack_count <= 0) {
1578 q->u.in.polling = 0;
1579 q->u.in.ack_count = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001580 goto set;
1581 }
Jan Glaubere85dea02009-03-26 15:24:29 +01001582 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
Jan Glauber50f769d2008-12-25 13:38:47 +01001583 }
1584 else
1585 /* the only ACK will be deleted, so stop polling */
Jan Glauber779e6e12008-07-17 17:16:48 +02001586 q->u.in.polling = 0;
Jan Glauber50f769d2008-12-25 13:38:47 +01001587 }
Jan Glauber779e6e12008-07-17 17:16:48 +02001588
Jan Glauber50f769d2008-12-25 13:38:47 +01001589set:
Jan Glauber779e6e12008-07-17 17:16:48 +02001590 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001591
1592 used = atomic_add_return(count, &q->nr_buf_used) - count;
1593 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1594
1595 /* no need to signal as long as the adapter had free buffers */
1596 if (used)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001597 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001598
Jan Glauberd303b6f2009-03-26 15:24:31 +01001599 if (need_siga_in(q))
1600 return qdio_siga_input(q);
1601 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001602}
1603
1604/**
1605 * handle_outbound - process filled outbound buffers
1606 * @q: queue containing the buffers
1607 * @callflags: flags
1608 * @bufnr: first buffer to process
1609 * @count: how many buffers are filled
1610 */
Jan Glauberd303b6f2009-03-26 15:24:31 +01001611static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1612 int bufnr, int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001613{
Jan Glauberc26001d2011-05-23 10:24:38 +02001614 unsigned char state = 0;
Jan Glauberd303b6f2009-03-26 15:24:31 +01001615 int used, rc = 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001616
Jan Glauber6486cda2010-01-04 09:05:42 +01001617 qperf_inc(q, outbound_call);
Jan Glauber779e6e12008-07-17 17:16:48 +02001618
1619 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1620 used = atomic_add_return(count, &q->nr_buf_used);
1621 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1622
Jan Glauber01958432011-01-05 12:47:51 +01001623 if (used == QDIO_MAX_BUFFERS_PER_Q)
1624 qperf_inc(q, outbound_queue_full);
1625
Jan Glauber6486cda2010-01-04 09:05:42 +01001626 if (callflags & QDIO_FLAG_PCI_OUT) {
Jan Glauber779e6e12008-07-17 17:16:48 +02001627 q->u.out.pci_out_enabled = 1;
Jan Glauber6486cda2010-01-04 09:05:42 +01001628 qperf_inc(q, pci_request_int);
Jan Glauber110da312011-01-05 12:47:53 +01001629 } else
Jan Glauber779e6e12008-07-17 17:16:48 +02001630 q->u.out.pci_out_enabled = 0;
1631
1632 if (queue_type(q) == QDIO_IQDIO_QFMT) {
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001633 unsigned long phys_aob = 0;
1634
1635 /* One SIGA-W per buffer required for unicast HSI */
Jan Glauber110da312011-01-05 12:47:53 +01001636 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1637
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001638 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1639
1640 rc = qdio_kick_outbound_q(q, phys_aob);
Jan Glauber90adac52011-01-05 12:47:54 +01001641 } else if (need_siga_sync(q)) {
Jan Glauber110da312011-01-05 12:47:53 +01001642 rc = qdio_siga_sync_q(q);
1643 } else {
1644 /* try to fast requeue buffers */
1645 get_buf_state(q, prev_buf(bufnr), &state, 0);
1646 if (state != SLSB_CU_OUTPUT_PRIMED)
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001647 rc = qdio_kick_outbound_q(q, 0);
Jan Glauber779e6e12008-07-17 17:16:48 +02001648 else
Jan Glauber110da312011-01-05 12:47:53 +01001649 qperf_inc(q, fast_requeue);
Jan Glauber779e6e12008-07-17 17:16:48 +02001650 }
1651
Jan Glauber3d6c76f2011-01-05 12:47:50 +01001652 /* in case of SIGA errors we must process the error immediately */
1653 if (used >= q->u.out.scan_threshold || rc)
1654 tasklet_schedule(&q->tasklet);
1655 else
1656 /* free the SBALs in case of no further traffic */
1657 if (!timer_pending(&q->u.out.timer))
1658 mod_timer(&q->u.out.timer, jiffies + HZ);
Jan Glauberd303b6f2009-03-26 15:24:31 +01001659 return rc;
Jan Glauber779e6e12008-07-17 17:16:48 +02001660}
1661
1662/**
1663 * do_QDIO - process input or output buffers
1664 * @cdev: associated ccw_device for the qdio subchannel
1665 * @callflags: input or output and special flags from the program
1666 * @q_nr: queue number
1667 * @bufnr: buffer number
1668 * @count: how many buffers to process
1669 */
1670int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
Jan Glauber66182412009-06-22 12:08:15 +02001671 int q_nr, unsigned int bufnr, unsigned int count)
Jan Glauber779e6e12008-07-17 17:16:48 +02001672{
1673 struct qdio_irq *irq_ptr;
Jan Glauber779e6e12008-07-17 17:16:48 +02001674
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001675
Jan Glauber66182412009-06-22 12:08:15 +02001676 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
Jan Glauber779e6e12008-07-17 17:16:48 +02001677 return -EINVAL;
1678
Jan Glauber779e6e12008-07-17 17:16:48 +02001679 irq_ptr = cdev->private->qdio_data;
1680 if (!irq_ptr)
1681 return -ENODEV;
1682
Jan Glauber1d7e1502009-09-22 22:58:39 +02001683 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1684 "do%02x b:%02x c:%02x", callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001685
1686 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1687 return -EBUSY;
Jan Glauber9a265132011-03-23 10:16:01 +01001688 if (!count)
1689 return 0;
Jan Glauber779e6e12008-07-17 17:16:48 +02001690 if (callflags & QDIO_FLAG_SYNC_INPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001691 return handle_inbound(irq_ptr->input_qs[q_nr],
1692 callflags, bufnr, count);
Jan Glauber779e6e12008-07-17 17:16:48 +02001693 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
Jan Glauberd303b6f2009-03-26 15:24:31 +01001694 return handle_outbound(irq_ptr->output_qs[q_nr],
1695 callflags, bufnr, count);
1696 return -EINVAL;
Jan Glauber779e6e12008-07-17 17:16:48 +02001697}
1698EXPORT_SYMBOL_GPL(do_QDIO);
1699
Jan Glauberd36deae2010-09-07 21:14:39 +00001700/**
1701 * qdio_start_irq - process input buffers
1702 * @cdev: associated ccw_device for the qdio subchannel
1703 * @nr: input queue number
1704 *
1705 * Return codes
1706 * 0 - success
1707 * 1 - irqs not started since new data is available
1708 */
1709int qdio_start_irq(struct ccw_device *cdev, int nr)
1710{
1711 struct qdio_q *q;
1712 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1713
1714 if (!irq_ptr)
1715 return -ENODEV;
1716 q = irq_ptr->input_qs[nr];
1717
1718 WARN_ON(queue_irqs_enabled(q));
1719
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001720 if (!shared_ind(q))
Jan Glauberd36deae2010-09-07 21:14:39 +00001721 xchg(q->irq_ptr->dsci, 0);
1722
1723 qdio_stop_polling(q);
1724 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1725
1726 /*
1727 * We need to check again to not lose initiative after
1728 * resetting the ACK state.
1729 */
frank.blaschka@de.ibm.com104ea552011-08-08 01:33:55 +00001730 if (!shared_ind(q) && *q->irq_ptr->dsci)
Jan Glauberd36deae2010-09-07 21:14:39 +00001731 goto rescan;
1732 if (!qdio_inbound_q_done(q))
1733 goto rescan;
1734 return 0;
1735
1736rescan:
1737 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1738 &q->u.in.queue_irq_state))
1739 return 0;
1740 else
1741 return 1;
1742
1743}
1744EXPORT_SYMBOL(qdio_start_irq);
1745
1746/**
1747 * qdio_get_next_buffers - process input buffers
1748 * @cdev: associated ccw_device for the qdio subchannel
1749 * @nr: input queue number
1750 * @bufnr: first filled buffer number
1751 * @error: buffers are in error state
1752 *
1753 * Return codes
1754 * < 0 - error
1755 * = 0 - no new buffers found
1756 * > 0 - number of processed buffers
1757 */
1758int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1759 int *error)
1760{
1761 struct qdio_q *q;
1762 int start, end;
1763 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1764
1765 if (!irq_ptr)
1766 return -ENODEV;
1767 q = irq_ptr->input_qs[nr];
1768 WARN_ON(queue_irqs_enabled(q));
1769
Jan Glauberd36deae2010-09-07 21:14:39 +00001770 /*
Jan Glauber90adac52011-01-05 12:47:54 +01001771 * Cannot rely on automatic sync after interrupt since queues may
1772 * also be examined without interrupt.
Jan Glauberd36deae2010-09-07 21:14:39 +00001773 */
Jan Glauber90adac52011-01-05 12:47:54 +01001774 if (need_siga_sync(q))
1775 qdio_sync_queues(q);
1776
1777 /* check the PCI capable outbound queues. */
Jan Glauberd36deae2010-09-07 21:14:39 +00001778 qdio_check_outbound_after_thinint(q);
1779
1780 if (!qdio_inbound_q_moved(q))
1781 return 0;
1782
1783 /* Note: upper-layer MUST stop processing immediately here ... */
1784 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1785 return -EIO;
1786
1787 start = q->first_to_kick;
1788 end = q->first_to_check;
1789 *bufnr = start;
1790 *error = q->qdio_error;
1791
1792 /* for the next time */
1793 q->first_to_kick = end;
1794 q->qdio_error = 0;
1795 return sub_buf(end, start);
1796}
1797EXPORT_SYMBOL(qdio_get_next_buffers);
1798
1799/**
1800 * qdio_stop_irq - disable interrupt processing for the device
1801 * @cdev: associated ccw_device for the qdio subchannel
1802 * @nr: input queue number
1803 *
1804 * Return codes
1805 * 0 - interrupts were already disabled
1806 * 1 - interrupts successfully disabled
1807 */
1808int qdio_stop_irq(struct ccw_device *cdev, int nr)
1809{
1810 struct qdio_q *q;
1811 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1812
1813 if (!irq_ptr)
1814 return -ENODEV;
1815 q = irq_ptr->input_qs[nr];
1816
1817 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1818 &q->u.in.queue_irq_state))
1819 return 0;
1820 else
1821 return 1;
1822}
1823EXPORT_SYMBOL(qdio_stop_irq);
1824
Jan Glauber779e6e12008-07-17 17:16:48 +02001825static int __init init_QDIO(void)
1826{
1827 int rc;
1828
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001829 rc = qdio_debug_init();
Jan Glauber779e6e12008-07-17 17:16:48 +02001830 if (rc)
1831 return rc;
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001832 rc = qdio_setup_init();
1833 if (rc)
1834 goto out_debug;
Jan Glauber779e6e12008-07-17 17:16:48 +02001835 rc = tiqdio_allocate_memory();
1836 if (rc)
1837 goto out_cache;
Jan Glauber779e6e12008-07-17 17:16:48 +02001838 rc = tiqdio_register_thinints();
1839 if (rc)
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001840 goto out_ti;
Jan Glauber779e6e12008-07-17 17:16:48 +02001841 return 0;
1842
Jan Glauber779e6e12008-07-17 17:16:48 +02001843out_ti:
1844 tiqdio_free_memory();
1845out_cache:
1846 qdio_setup_exit();
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001847out_debug:
1848 qdio_debug_exit();
Jan Glauber779e6e12008-07-17 17:16:48 +02001849 return rc;
1850}
1851
1852static void __exit exit_QDIO(void)
1853{
1854 tiqdio_unregister_thinints();
1855 tiqdio_free_memory();
Jan Glauber779e6e12008-07-17 17:16:48 +02001856 qdio_setup_exit();
Sebastian Ottaa5c8df2011-04-04 09:43:31 +02001857 qdio_debug_exit();
Jan Glauber779e6e12008-07-17 17:16:48 +02001858}
1859
1860module_init(init_QDIO);
1861module_exit(exit_QDIO);