blob: f6e108d0125f140f13312de658ddcbb02b6f590f [file] [log] [blame]
Karsten Keil1b2b03f2008-07-27 01:54:58 +02001/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
Karsten Keil1b2b03f2008-07-27 01:54:58 +020019#include <linux/module.h>
20#include <linux/mISDNhw.h>
21
22static void
23dchannel_bh(struct work_struct *ws)
24{
25 struct dchannel *dch = container_of(ws, struct dchannel, workq);
26 struct sk_buff *skb;
27 int err;
28
29 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
30 while ((skb = skb_dequeue(&dch->rqueue))) {
31 if (likely(dch->dev.D.peer)) {
32 err = dch->dev.D.recv(dch->dev.D.peer, skb);
33 if (err)
34 dev_kfree_skb(skb);
35 } else
36 dev_kfree_skb(skb);
37 }
38 }
39 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
40 if (dch->phfunc)
41 dch->phfunc(dch);
42 }
43}
44
45static void
46bchannel_bh(struct work_struct *ws)
47{
48 struct bchannel *bch = container_of(ws, struct bchannel, workq);
49 struct sk_buff *skb;
50 int err;
51
52 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
53 while ((skb = skb_dequeue(&bch->rqueue))) {
Karsten Keil1b2b03f2008-07-27 01:54:58 +020054 bch->rcount--;
55 if (likely(bch->ch.peer)) {
56 err = bch->ch.recv(bch->ch.peer, skb);
57 if (err)
58 dev_kfree_skb(skb);
59 } else
60 dev_kfree_skb(skb);
61 }
62 }
63}
64
65int
66mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67{
68 test_and_set_bit(FLG_HDLC, &ch->Flags);
69 ch->maxlen = maxlen;
70 ch->hw = NULL;
71 ch->rx_skb = NULL;
72 ch->tx_skb = NULL;
73 ch->tx_idx = 0;
74 ch->phfunc = phf;
75 skb_queue_head_init(&ch->squeue);
76 skb_queue_head_init(&ch->rqueue);
77 INIT_LIST_HEAD(&ch->dev.bchannels);
78 INIT_WORK(&ch->workq, dchannel_bh);
79 return 0;
80}
81EXPORT_SYMBOL(mISDN_initdchannel);
82
83int
84mISDN_initbchannel(struct bchannel *ch, int maxlen)
85{
86 ch->Flags = 0;
87 ch->maxlen = maxlen;
88 ch->hw = NULL;
89 ch->rx_skb = NULL;
90 ch->tx_skb = NULL;
91 ch->tx_idx = 0;
92 skb_queue_head_init(&ch->rqueue);
93 ch->rcount = 0;
94 ch->next_skb = NULL;
95 INIT_WORK(&ch->workq, bchannel_bh);
96 return 0;
97}
98EXPORT_SYMBOL(mISDN_initbchannel);
99
100int
101mISDN_freedchannel(struct dchannel *ch)
102{
103 if (ch->tx_skb) {
104 dev_kfree_skb(ch->tx_skb);
105 ch->tx_skb = NULL;
106 }
107 if (ch->rx_skb) {
108 dev_kfree_skb(ch->rx_skb);
109 ch->rx_skb = NULL;
110 }
111 skb_queue_purge(&ch->squeue);
112 skb_queue_purge(&ch->rqueue);
Tejun Heo0d26aa72010-12-24 15:59:07 +0100113 flush_work_sync(&ch->workq);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200114 return 0;
115}
116EXPORT_SYMBOL(mISDN_freedchannel);
117
Karsten Keilfb286f02009-07-09 10:02:29 +0200118void
119mISDN_clear_bchannel(struct bchannel *ch)
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200120{
121 if (ch->tx_skb) {
122 dev_kfree_skb(ch->tx_skb);
123 ch->tx_skb = NULL;
124 }
Karsten Keilfb286f02009-07-09 10:02:29 +0200125 ch->tx_idx = 0;
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200126 if (ch->rx_skb) {
127 dev_kfree_skb(ch->rx_skb);
128 ch->rx_skb = NULL;
129 }
130 if (ch->next_skb) {
131 dev_kfree_skb(ch->next_skb);
132 ch->next_skb = NULL;
133 }
Karsten Keilfb286f02009-07-09 10:02:29 +0200134 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
135 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
136 test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
137}
138EXPORT_SYMBOL(mISDN_clear_bchannel);
139
140int
141mISDN_freebchannel(struct bchannel *ch)
142{
143 mISDN_clear_bchannel(ch);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200144 skb_queue_purge(&ch->rqueue);
145 ch->rcount = 0;
Tejun Heo0d26aa72010-12-24 15:59:07 +0100146 flush_work_sync(&ch->workq);
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200147 return 0;
148}
149EXPORT_SYMBOL(mISDN_freebchannel);
150
151static inline u_int
152get_sapi_tei(u_char *p)
153{
154 u_int sapi, tei;
155
156 sapi = *p >> 2;
157 tei = p[1] >> 1;
158 return sapi | (tei << 8);
159}
160
161void
162recv_Dchannel(struct dchannel *dch)
163{
164 struct mISDNhead *hh;
165
166 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
167 dev_kfree_skb(dch->rx_skb);
168 dch->rx_skb = NULL;
169 return;
170 }
171 hh = mISDN_HEAD_P(dch->rx_skb);
172 hh->prim = PH_DATA_IND;
173 hh->id = get_sapi_tei(dch->rx_skb->data);
174 skb_queue_tail(&dch->rqueue, dch->rx_skb);
175 dch->rx_skb = NULL;
176 schedule_event(dch, FLG_RECVQUEUE);
177}
178EXPORT_SYMBOL(recv_Dchannel);
179
180void
Martin Bachem1f28fa192008-09-03 15:17:45 +0200181recv_Echannel(struct dchannel *ech, struct dchannel *dch)
182{
183 struct mISDNhead *hh;
184
185 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
186 dev_kfree_skb(ech->rx_skb);
187 ech->rx_skb = NULL;
188 return;
189 }
190 hh = mISDN_HEAD_P(ech->rx_skb);
191 hh->prim = PH_DATA_E_IND;
192 hh->id = get_sapi_tei(ech->rx_skb->data);
193 skb_queue_tail(&dch->rqueue, ech->rx_skb);
194 ech->rx_skb = NULL;
195 schedule_event(dch, FLG_RECVQUEUE);
196}
197EXPORT_SYMBOL(recv_Echannel);
198
199void
Andreas Eversberg7cfa1532009-05-22 11:04:46 +0000200recv_Bchannel(struct bchannel *bch, unsigned int id)
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200201{
202 struct mISDNhead *hh;
203
204 hh = mISDN_HEAD_P(bch->rx_skb);
205 hh->prim = PH_DATA_IND;
Andreas Eversberg7cfa1532009-05-22 11:04:46 +0000206 hh->id = id;
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200207 if (bch->rcount >= 64) {
Andreas Eversberg11618492008-08-06 19:13:07 +0200208 printk(KERN_WARNING "B-channel %p receive queue overflow, "
Paul Bolle1752a372011-02-15 00:05:23 +0100209 "flushing!\n", bch);
Andreas Eversberg11618492008-08-06 19:13:07 +0200210 skb_queue_purge(&bch->rqueue);
211 bch->rcount = 0;
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200212 return;
213 }
214 bch->rcount++;
215 skb_queue_tail(&bch->rqueue, bch->rx_skb);
216 bch->rx_skb = NULL;
217 schedule_event(bch, FLG_RECVQUEUE);
218}
219EXPORT_SYMBOL(recv_Bchannel);
220
221void
222recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
223{
224 skb_queue_tail(&dch->rqueue, skb);
225 schedule_event(dch, FLG_RECVQUEUE);
226}
227EXPORT_SYMBOL(recv_Dchannel_skb);
228
229void
230recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
231{
232 if (bch->rcount >= 64) {
Andreas Eversberg11618492008-08-06 19:13:07 +0200233 printk(KERN_WARNING "B-channel %p receive queue overflow, "
Paul Bolle1752a372011-02-15 00:05:23 +0100234 "flushing!\n", bch);
Andreas Eversberg11618492008-08-06 19:13:07 +0200235 skb_queue_purge(&bch->rqueue);
236 bch->rcount = 0;
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200237 }
238 bch->rcount++;
239 skb_queue_tail(&bch->rqueue, skb);
240 schedule_event(bch, FLG_RECVQUEUE);
241}
242EXPORT_SYMBOL(recv_Bchannel_skb);
243
244static void
245confirm_Dsend(struct dchannel *dch)
246{
247 struct sk_buff *skb;
248
249 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
250 0, NULL, GFP_ATOMIC);
251 if (!skb) {
252 printk(KERN_ERR "%s: no skb id %x\n", __func__,
253 mISDN_HEAD_ID(dch->tx_skb));
254 return;
255 }
256 skb_queue_tail(&dch->rqueue, skb);
257 schedule_event(dch, FLG_RECVQUEUE);
258}
259
260int
261get_next_dframe(struct dchannel *dch)
262{
263 dch->tx_idx = 0;
264 dch->tx_skb = skb_dequeue(&dch->squeue);
265 if (dch->tx_skb) {
266 confirm_Dsend(dch);
267 return 1;
268 }
269 dch->tx_skb = NULL;
270 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
271 return 0;
272}
273EXPORT_SYMBOL(get_next_dframe);
274
275void
276confirm_Bsend(struct bchannel *bch)
277{
278 struct sk_buff *skb;
279
Andreas Eversberg11618492008-08-06 19:13:07 +0200280 if (bch->rcount >= 64) {
281 printk(KERN_WARNING "B-channel %p receive queue overflow, "
Paul Bolle1752a372011-02-15 00:05:23 +0100282 "flushing!\n", bch);
Andreas Eversberg11618492008-08-06 19:13:07 +0200283 skb_queue_purge(&bch->rqueue);
284 bch->rcount = 0;
285 }
Karsten Keil1b2b03f2008-07-27 01:54:58 +0200286 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
287 0, NULL, GFP_ATOMIC);
288 if (!skb) {
289 printk(KERN_ERR "%s: no skb id %x\n", __func__,
290 mISDN_HEAD_ID(bch->tx_skb));
291 return;
292 }
293 bch->rcount++;
294 skb_queue_tail(&bch->rqueue, skb);
295 schedule_event(bch, FLG_RECVQUEUE);
296}
297EXPORT_SYMBOL(confirm_Bsend);
298
299int
300get_next_bframe(struct bchannel *bch)
301{
302 bch->tx_idx = 0;
303 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
304 bch->tx_skb = bch->next_skb;
305 if (bch->tx_skb) {
306 bch->next_skb = NULL;
307 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
308 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
309 confirm_Bsend(bch); /* not for transparent */
310 return 1;
311 } else {
312 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
313 printk(KERN_WARNING "B TX_NEXT without skb\n");
314 }
315 }
316 bch->tx_skb = NULL;
317 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
318 return 0;
319}
320EXPORT_SYMBOL(get_next_bframe);
321
322void
323queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
324{
325 struct mISDNhead *hh;
326
327 if (!skb) {
328 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
329 } else {
330 if (ch->peer) {
331 hh = mISDN_HEAD_P(skb);
332 hh->prim = pr;
333 hh->id = id;
334 if (!ch->recv(ch->peer, skb))
335 return;
336 }
337 dev_kfree_skb(skb);
338 }
339}
340EXPORT_SYMBOL(queue_ch_frame);
341
342int
343dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
344{
345 /* check oversize */
346 if (skb->len <= 0) {
347 printk(KERN_WARNING "%s: skb too small\n", __func__);
348 return -EINVAL;
349 }
350 if (skb->len > ch->maxlen) {
351 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
352 __func__, skb->len, ch->maxlen);
353 return -EINVAL;
354 }
355 /* HW lock must be obtained */
356 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
357 skb_queue_tail(&ch->squeue, skb);
358 return 0;
359 } else {
360 /* write to fifo */
361 ch->tx_skb = skb;
362 ch->tx_idx = 0;
363 return 1;
364 }
365}
366EXPORT_SYMBOL(dchannel_senddata);
367
368int
369bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
370{
371
372 /* check oversize */
373 if (skb->len <= 0) {
374 printk(KERN_WARNING "%s: skb too small\n", __func__);
375 return -EINVAL;
376 }
377 if (skb->len > ch->maxlen) {
378 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
379 __func__, skb->len, ch->maxlen);
380 return -EINVAL;
381 }
382 /* HW lock must be obtained */
383 /* check for pending next_skb */
384 if (ch->next_skb) {
385 printk(KERN_WARNING
386 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
387 __func__, skb->len, ch->next_skb->len);
388 return -EBUSY;
389 }
390 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
391 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
392 ch->next_skb = skb;
393 return 0;
394 } else {
395 /* write to fifo */
396 ch->tx_skb = skb;
397 ch->tx_idx = 0;
398 return 1;
399 }
400}
401EXPORT_SYMBOL(bchannel_senddata);