blob: 073352517adc20f043ac4024b791d2b45bce0766 [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00009#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/string.h>
15#include <linux/list.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/sched.h>
19#include <linux/if_arp.h>
20#include <linux/timer.h>
Daniel Martensson5bbed922011-10-13 11:29:28 +000021#include <linux/rtnetlink.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000022#include <net/caif/caif_layer.h>
23#include <net/caif/caif_hsi.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27MODULE_DESCRIPTION("CAIF HSI driver");
28
29/* Returns the number of padding bytes for alignment. */
30#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31 (((pow)-((x)&((pow)-1)))))
32
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +000033static int inactivity_timeout = 1000;
34module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000037/*
38 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero !
40 */
41static int hsi_head_align = 4;
42module_param(hsi_head_align, int, S_IRUGO);
43MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
44
45static int hsi_tail_align = 4;
46module_param(hsi_tail_align, int, S_IRUGO);
47MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
48
49/*
50 * HSI link layer flowcontrol thresholds.
51 * Warning: A high threshold value migth increase throughput but it will at
52 * the same time prevent channel prioritization and increase the risk of
53 * flooding the modem. The high threshold should be above the low.
54 */
55static int hsi_high_threshold = 100;
56module_param(hsi_high_threshold, int, S_IRUGO);
57MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
58
59static int hsi_low_threshold = 50;
60module_param(hsi_low_threshold, int, S_IRUGO);
61MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
62
63#define ON 1
64#define OFF 0
65
66/*
67 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
68 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
69 * de-asserted before the number of packets drops below LOW_WATER_MARK.
70 */
71#define LOW_WATER_MARK hsi_low_threshold
72#define HIGH_WATER_MARK hsi_high_threshold
73
74static LIST_HEAD(cfhsi_list);
75static spinlock_t cfhsi_list_lock;
76
77static void cfhsi_inactivity_tout(unsigned long arg)
78{
79 struct cfhsi *cfhsi = (struct cfhsi *)arg;
80
81 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
82 __func__);
83
84 /* Schedule power down work queue. */
85 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87}
88
89static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90{
91 struct sk_buff *skb;
92
93 for (;;) {
94 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead);
96 if (!skb)
97 break;
98
99 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++;
101 spin_unlock_bh(&cfhsi->lock);
102 kfree_skb(skb);
103 }
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000106 mod_timer(&cfhsi->timer,
107 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000108 spin_unlock_bh(&cfhsi->lock);
109}
110
111static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
112{
113 char buffer[32]; /* Any reasonable value */
114 size_t fifo_occupancy;
115 int ret;
116
117 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
118 __func__);
119
120
121 ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
122 if (ret) {
123 dev_warn(&cfhsi->ndev->dev,
124 "%s: can't wake up HSI interface: %d.\n",
125 __func__, ret);
126 return ret;
127 }
128
129 do {
130 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
131 &fifo_occupancy);
132 if (ret) {
133 dev_warn(&cfhsi->ndev->dev,
134 "%s: can't get FIFO occupancy: %d.\n",
135 __func__, ret);
136 break;
137 } else if (!fifo_occupancy)
138 /* No more data, exitting normally */
139 break;
140
141 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
142 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
143 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
144 cfhsi->dev);
145 if (ret) {
146 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
147 dev_warn(&cfhsi->ndev->dev,
148 "%s: can't read data: %d.\n",
149 __func__, ret);
150 break;
151 }
152
153 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000154 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000155 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
156
157 if (ret < 0) {
158 dev_warn(&cfhsi->ndev->dev,
159 "%s: can't wait for flush complete: %d.\n",
160 __func__, ret);
161 break;
162 } else if (!ret) {
163 ret = -ETIMEDOUT;
164 dev_warn(&cfhsi->ndev->dev,
165 "%s: timeout waiting for flush complete.\n",
166 __func__);
167 break;
168 }
169 } while (1);
170
171 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
172
173 return ret;
174}
175
176static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
177{
178 int nfrms = 0;
179 int pld_len = 0;
180 struct sk_buff *skb;
181 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
182
183 skb = skb_dequeue(&cfhsi->qhead);
184 if (!skb)
185 return 0;
186
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000187 /* Clear offset. */
188 desc->offset = 0;
189
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000190 /* Check if we can embed a CAIF frame. */
191 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
192 struct caif_payload_info *info;
193 int hpad = 0;
194 int tpad = 0;
195
196 /* Calculate needed head alignment and tail alignment. */
197 info = (struct caif_payload_info *)&skb->cb;
198
199 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
200 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
201
202 /* Check if frame still fits with added alignment. */
203 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
204 u8 *pemb = desc->emb_frm;
205 desc->offset = CFHSI_DESC_SHORT_SZ;
206 *pemb = (u8)(hpad - 1);
207 pemb += hpad;
208
209 /* Update network statistics. */
210 cfhsi->ndev->stats.tx_packets++;
211 cfhsi->ndev->stats.tx_bytes += skb->len;
212
213 /* Copy in embedded CAIF frame. */
214 skb_copy_bits(skb, 0, pemb, skb->len);
215 consume_skb(skb);
216 skb = NULL;
217 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000218 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000219
220 /* Create payload CAIF frames. */
221 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
222 while (nfrms < CFHSI_MAX_PKTS) {
223 struct caif_payload_info *info;
224 int hpad = 0;
225 int tpad = 0;
226
227 if (!skb)
228 skb = skb_dequeue(&cfhsi->qhead);
229
230 if (!skb)
231 break;
232
233 /* Calculate needed head alignment and tail alignment. */
234 info = (struct caif_payload_info *)&skb->cb;
235
236 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
237 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
238
239 /* Fill in CAIF frame length in descriptor. */
240 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
241
242 /* Fill head padding information. */
243 *pfrm = (u8)(hpad - 1);
244 pfrm += hpad;
245
246 /* Update network statistics. */
247 cfhsi->ndev->stats.tx_packets++;
248 cfhsi->ndev->stats.tx_bytes += skb->len;
249
250 /* Copy in CAIF frame. */
251 skb_copy_bits(skb, 0, pfrm, skb->len);
252
253 /* Update payload length. */
254 pld_len += desc->cffrm_len[nfrms];
255
256 /* Update frame pointer. */
257 pfrm += skb->len + tpad;
258 consume_skb(skb);
259 skb = NULL;
260
261 /* Update number of frames. */
262 nfrms++;
263 }
264
265 /* Unused length fields should be zero-filled (according to SPEC). */
266 while (nfrms < CFHSI_MAX_PKTS) {
267 desc->cffrm_len[nfrms] = 0x0000;
268 nfrms++;
269 }
270
271 /* Check if we can piggy-back another descriptor. */
272 skb = skb_peek(&cfhsi->qhead);
273 if (skb)
274 desc->header |= CFHSI_PIGGY_DESC;
275 else
276 desc->header &= ~CFHSI_PIGGY_DESC;
277
278 return CFHSI_DESC_SZ + pld_len;
279}
280
Daniel Martensson687b13e2011-10-13 11:29:25 +0000281static void cfhsi_tx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000282{
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000283 struct cfhsi_desc *desc = NULL;
284 int len = 0;
285 int res;
286
Daniel Martensson687b13e2011-10-13 11:29:25 +0000287 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000288
289 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
290 return;
291
292 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
293
294 do {
295 /*
296 * Send flow on if flow off has been previously signalled
297 * and number of packets is below low water mark.
298 */
299 spin_lock_bh(&cfhsi->lock);
300 if (cfhsi->flow_off_sent &&
301 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
302 cfhsi->cfdev.flowctrl) {
303
304 cfhsi->flow_off_sent = 0;
305 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
306 }
307 spin_unlock_bh(&cfhsi->lock);
308
309 /* Create HSI frame. */
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000310 do {
311 len = cfhsi_tx_frm(desc, cfhsi);
312 if (!len) {
313 spin_lock_bh(&cfhsi->lock);
314 if (unlikely(skb_peek(&cfhsi->qhead))) {
315 spin_unlock_bh(&cfhsi->lock);
316 continue;
317 }
318 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
319 /* Start inactivity timer. */
320 mod_timer(&cfhsi->timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000321 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000322 spin_unlock_bh(&cfhsi->lock);
323 goto done;
324 }
325 } while (!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000326
327 /* Set up new transfer. */
328 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
329 if (WARN_ON(res < 0)) {
330 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
331 __func__, res);
332 }
333 } while (res < 0);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000334
335done:
336 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000337}
338
339static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
340{
341 struct cfhsi *cfhsi;
342
343 cfhsi = container_of(drv, struct cfhsi, drv);
344 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
345 __func__);
346
347 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
348 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000349 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000350}
351
Daniel Martensson5bbed922011-10-13 11:29:28 +0000352static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000353{
354 int xfer_sz = 0;
355 int nfrms = 0;
356 u16 *plen = NULL;
357 u8 *pfrm = NULL;
358
359 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
360 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
361 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
362 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000363 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000364 }
365
366 /* Check for embedded CAIF frame. */
367 if (desc->offset) {
368 struct sk_buff *skb;
369 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000370 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000371 pfrm = ((u8 *)desc) + desc->offset;
372
373 /* Remove offset padding. */
374 pfrm += *pfrm + 1;
375
376 /* Read length of CAIF frame (little endian). */
377 len = *pfrm;
378 len |= ((*(pfrm+1)) << 8) & 0xFF00;
379 len += 2; /* Add FCS fields. */
380
Daniel Martensson5bbed922011-10-13 11:29:28 +0000381 /* Sanity check length of CAIF frame. */
382 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
383 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
384 __func__);
385 return -EPROTO;
386 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000387
388 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000389 skb = alloc_skb(len + 1, GFP_ATOMIC);
390 if (!skb) {
391 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
392 __func__);
393 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000394 }
395 caif_assert(skb != NULL);
396
397 dst = skb_put(skb, len);
398 memcpy(dst, pfrm, len);
399
400 skb->protocol = htons(ETH_P_CAIF);
401 skb_reset_mac_header(skb);
402 skb->dev = cfhsi->ndev;
403
404 /*
405 * We are called from a arch specific platform device.
406 * Unfortunately we don't know what context we're
407 * running in.
408 */
409 if (in_interrupt())
410 netif_rx(skb);
411 else
412 netif_rx_ni(skb);
413
414 /* Update network statistics. */
415 cfhsi->ndev->stats.rx_packets++;
416 cfhsi->ndev->stats.rx_bytes += len;
417 }
418
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000419 /* Calculate transfer length. */
420 plen = desc->cffrm_len;
421 while (nfrms < CFHSI_MAX_PKTS && *plen) {
422 xfer_sz += *plen;
423 plen++;
424 nfrms++;
425 }
426
427 /* Check for piggy-backed descriptor. */
428 if (desc->header & CFHSI_PIGGY_DESC)
429 xfer_sz += CFHSI_DESC_SZ;
430
Daniel Martensson5bbed922011-10-13 11:29:28 +0000431 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000432 dev_err(&cfhsi->ndev->dev,
433 "%s: Invalid payload len: %d, ignored.\n",
434 __func__, xfer_sz);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000435 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000436 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000437 return xfer_sz;
438}
439
Daniel Martensson5bbed922011-10-13 11:29:28 +0000440static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000441{
442 int rx_sz = 0;
443 int nfrms = 0;
444 u16 *plen = NULL;
445 u8 *pfrm = NULL;
446
447 /* Sanity check header and offset. */
448 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
449 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
450 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
451 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000452 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000453 }
454
455 /* Set frame pointer to start of payload. */
456 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
457 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000458
459 /* Skip already processed frames. */
460 while (nfrms < cfhsi->rx_state.nfrms) {
461 pfrm += *plen;
462 rx_sz += *plen;
463 plen++;
464 nfrms++;
465 }
466
467 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000468 while (nfrms < CFHSI_MAX_PKTS && *plen) {
469 struct sk_buff *skb;
470 u8 *dst = NULL;
471 u8 *pcffrm = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000472 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000473
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000474 /* CAIF frame starts after head padding. */
475 pcffrm = pfrm + *pfrm + 1;
476
477 /* Read length of CAIF frame (little endian). */
478 len = *pcffrm;
479 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
480 len += 2; /* Add FCS fields. */
481
Daniel Martensson5bbed922011-10-13 11:29:28 +0000482 /* Sanity check length of CAIF frames. */
483 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
484 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
485 __func__);
486 return -EPROTO;
487 }
488
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000489 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000490 skb = alloc_skb(len + 1, GFP_ATOMIC);
491 if (!skb) {
492 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
493 __func__);
494 cfhsi->rx_state.nfrms = nfrms;
495 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000496 }
497 caif_assert(skb != NULL);
498
499 dst = skb_put(skb, len);
500 memcpy(dst, pcffrm, len);
501
502 skb->protocol = htons(ETH_P_CAIF);
503 skb_reset_mac_header(skb);
504 skb->dev = cfhsi->ndev;
505
506 /*
507 * We're called from a platform device,
508 * and don't know the context we're running in.
509 */
510 if (in_interrupt())
511 netif_rx(skb);
512 else
513 netif_rx_ni(skb);
514
515 /* Update network statistics. */
516 cfhsi->ndev->stats.rx_packets++;
517 cfhsi->ndev->stats.rx_bytes += len;
518
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000519 pfrm += *plen;
520 rx_sz += *plen;
521 plen++;
522 nfrms++;
523 }
524
525 return rx_sz;
526}
527
Daniel Martensson687b13e2011-10-13 11:29:25 +0000528static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000529{
530 int res;
531 int desc_pld_len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000532 struct cfhsi_desc *desc = NULL;
533
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000534 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
535
Daniel Martensson687b13e2011-10-13 11:29:25 +0000536 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000537
538 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
539 return;
540
541 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000542 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000543 mod_timer_pending(&cfhsi->timer,
544 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000545 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000546
Daniel Martensson687b13e2011-10-13 11:29:25 +0000547 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
Daniel Martensson5bbed922011-10-13 11:29:28 +0000548 desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000549 if (desc_pld_len == -ENOMEM)
550 goto restart;
Daniel Martensson5bbed922011-10-13 11:29:28 +0000551 if (desc_pld_len == -EPROTO)
552 goto out_of_sync;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000553 } else {
554 int pld_len;
555
Daniel Martensson687b13e2011-10-13 11:29:25 +0000556 if (!cfhsi->rx_state.piggy_desc) {
Daniel Martensson5bbed922011-10-13 11:29:28 +0000557 pld_len = cfhsi_rx_pld(desc, cfhsi);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000558 if (pld_len == -ENOMEM)
559 goto restart;
Daniel Martensson5bbed922011-10-13 11:29:28 +0000560 if (pld_len == -EPROTO)
561 goto out_of_sync;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000562 cfhsi->rx_state.pld_len = pld_len;
563 } else {
564 pld_len = cfhsi->rx_state.pld_len;
565 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000566
567 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
568 struct cfhsi_desc *piggy_desc;
569 piggy_desc = (struct cfhsi_desc *)
570 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
571 pld_len);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000572 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000573
574 /* Extract piggy-backed descriptor. */
Daniel Martensson5bbed922011-10-13 11:29:28 +0000575 desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000576 if (desc_pld_len == -ENOMEM)
577 goto restart;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000578
579 /*
580 * Copy needed information from the piggy-backed
581 * descriptor to the descriptor in the start.
582 */
583 memcpy((u8 *)desc, (u8 *)piggy_desc,
584 CFHSI_DESC_SHORT_SZ);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000585
Daniel Martensson5bbed922011-10-13 11:29:28 +0000586 if (desc_pld_len == -EPROTO)
587 goto out_of_sync;
588 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000589 }
590
591 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000592 if (desc_pld_len) {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000593 cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000594 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
595 cfhsi->rx_len = desc_pld_len;
596 } else {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000597 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000598 cfhsi->rx_ptr = cfhsi->rx_buf;
599 cfhsi->rx_len = CFHSI_DESC_SZ;
600 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000601
602 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
603 /* Set up new transfer. */
604 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
605 __func__);
606 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
607 cfhsi->dev);
608 if (WARN_ON(res < 0)) {
609 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
610 __func__, res);
611 cfhsi->ndev->stats.rx_errors++;
612 cfhsi->ndev->stats.rx_dropped++;
613 }
614 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000615 return;
616
617restart:
618 if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
619 dev_err(&cfhsi->ndev->dev, "%s: No memory available "
620 "in %d iterations.\n",
621 __func__, CFHSI_MAX_RX_RETRIES);
622 BUG();
623 }
624 mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000625 return;
626
627out_of_sync:
628 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
629 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
630 cfhsi->rx_buf, CFHSI_DESC_SZ);
631 schedule_work(&cfhsi->out_of_sync_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000632}
633
634static void cfhsi_rx_slowpath(unsigned long arg)
635{
636 struct cfhsi *cfhsi = (struct cfhsi *)arg;
637
638 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
639 __func__);
640
641 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000642}
643
644static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
645{
646 struct cfhsi *cfhsi;
647
648 cfhsi = container_of(drv, struct cfhsi, drv);
649 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
650 __func__);
651
652 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
653 return;
654
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000655 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
656 wake_up_interruptible(&cfhsi->flush_fifo_wait);
657 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000658 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000659}
660
661static void cfhsi_wake_up(struct work_struct *work)
662{
663 struct cfhsi *cfhsi = NULL;
664 int res;
665 int len;
666 long ret;
667
668 cfhsi = container_of(work, struct cfhsi, wake_up_work);
669
670 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
671 return;
672
673 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
674 /* It happenes when wakeup is requested by
675 * both ends at the same time. */
676 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000677 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000678 return;
679 }
680
681 /* Activate wake line. */
682 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
683
684 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
685 __func__);
686
687 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000688 ret = CFHSI_WAKE_TOUT;
689 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
690 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000691 &cfhsi->bits), ret);
692 if (unlikely(ret < 0)) {
693 /* Interrupted by signal. */
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000694 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000695 __func__, ret);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000696
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000697 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
698 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
699 return;
700 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000701 bool ca_wake = false;
702 size_t fifo_occupancy = 0;
703
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000704 /* Wakeup timeout */
705 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
706 __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000707
708 /* Check FIFO to check if modem has sent something. */
709 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
710 &fifo_occupancy));
711
712 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
713 __func__, (unsigned) fifo_occupancy);
714
715 /* Check if we misssed the interrupt. */
716 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
717 &ca_wake));
718
719 if (ca_wake) {
720 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
721 __func__);
722
723 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
724 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
725
726 /* Continue execution. */
727 goto wake_ack;
728 }
729
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000730 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
731 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
732 return;
733 }
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000734wake_ack:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000735 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
736 __func__);
737
738 /* Clear power up bit. */
739 set_bit(CFHSI_AWAKE, &cfhsi->bits);
740 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
741
742 /* Resume read operation. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000743 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
744 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
745
746 if (WARN_ON(res < 0))
747 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000748
749 /* Clear power up acknowledment. */
750 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
751
752 spin_lock_bh(&cfhsi->lock);
753
754 /* Resume transmit if queue is not empty. */
755 if (!skb_peek(&cfhsi->qhead)) {
756 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
757 __func__);
758 /* Start inactivity timer. */
759 mod_timer(&cfhsi->timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000760 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000761 spin_unlock_bh(&cfhsi->lock);
762 return;
763 }
764
765 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
766 __func__);
767
768 spin_unlock_bh(&cfhsi->lock);
769
770 /* Create HSI frame. */
771 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
772
773 if (likely(len > 0)) {
774 /* Set up new transfer. */
775 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
776 if (WARN_ON(res < 0)) {
777 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
778 __func__, res);
779 cfhsi_abort_tx(cfhsi);
780 }
781 } else {
782 dev_err(&cfhsi->ndev->dev,
783 "%s: Failed to create HSI frame: %d.\n",
784 __func__, len);
785 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000786}
787
788static void cfhsi_wake_down(struct work_struct *work)
789{
790 long ret;
791 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000792 size_t fifo_occupancy = 0;
793 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000794
795 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000796 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000797
798 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
799 return;
800
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000801 /* Deactivate wake line. */
802 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
803
804 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000805 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000806 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000807 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
808 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000809 if (ret < 0) {
810 /* Interrupted by signal. */
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000811 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000812 __func__, ret);
813 return;
814 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000815 bool ca_wake = true;
816
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000817 /* Timeout */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000818 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000819
820 /* Check if we misssed the interrupt. */
821 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
822 &ca_wake));
823 if (!ca_wake)
824 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
825 __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000826 }
827
Daniel Martensson687b13e2011-10-13 11:29:25 +0000828 /* Check FIFO occupancy. */
829 while (retry) {
830 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
831 &fifo_occupancy));
832
833 if (!fifo_occupancy)
834 break;
835
836 set_current_state(TASK_INTERRUPTIBLE);
837 schedule_timeout(1);
838 retry--;
839 }
840
841 if (!retry)
842 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
843
844 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000845 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
846
Daniel Martensson687b13e2011-10-13 11:29:25 +0000847 /* Cancel pending RX requests. */
848 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000849
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000850}
851
Daniel Martensson5bbed922011-10-13 11:29:28 +0000852static void cfhsi_out_of_sync(struct work_struct *work)
853{
854 struct cfhsi *cfhsi = NULL;
855
856 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
857
858 rtnl_lock();
859 dev_close(cfhsi->ndev);
860 rtnl_unlock();
861}
862
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000863static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
864{
865 struct cfhsi *cfhsi = NULL;
866
867 cfhsi = container_of(drv, struct cfhsi, drv);
868 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
869 __func__);
870
871 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
872 wake_up_interruptible(&cfhsi->wake_up_wait);
873
874 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
875 return;
876
877 /* Schedule wake up work queue if the peer initiates. */
878 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
879 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
880}
881
882static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
883{
884 struct cfhsi *cfhsi = NULL;
885
886 cfhsi = container_of(drv, struct cfhsi, drv);
887 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
888 __func__);
889
890 /* Initiating low power is only permitted by the host (us). */
891 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
892 wake_up_interruptible(&cfhsi->wake_down_wait);
893}
894
895static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
896{
897 struct cfhsi *cfhsi = NULL;
898 int start_xfer = 0;
899 int timer_active;
900
901 if (!dev)
902 return -EINVAL;
903
904 cfhsi = netdev_priv(dev);
905
906 spin_lock_bh(&cfhsi->lock);
907
908 skb_queue_tail(&cfhsi->qhead, skb);
909
910 /* Sanity check; xmit should not be called after unregister_netdev */
911 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
912 spin_unlock_bh(&cfhsi->lock);
913 cfhsi_abort_tx(cfhsi);
914 return -EINVAL;
915 }
916
917 /* Send flow off if number of packets is above high water mark. */
918 if (!cfhsi->flow_off_sent &&
919 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
920 cfhsi->cfdev.flowctrl) {
921 cfhsi->flow_off_sent = 1;
922 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
923 }
924
925 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
926 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
927 start_xfer = 1;
928 }
929
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000930 if (!start_xfer) {
931 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000932 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000933 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000934
935 /* Delete inactivity timer if started. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000936 timer_active = del_timer_sync(&cfhsi->timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000937
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000938 spin_unlock_bh(&cfhsi->lock);
939
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000940 if (timer_active) {
941 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
942 int len;
943 int res;
944
945 /* Create HSI frame. */
946 len = cfhsi_tx_frm(desc, cfhsi);
947 BUG_ON(!len);
948
949 /* Set up new transfer. */
950 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
951 if (WARN_ON(res < 0)) {
952 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
953 __func__, res);
954 cfhsi_abort_tx(cfhsi);
955 }
956 } else {
957 /* Schedule wake up work queue if the we initiate. */
958 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
959 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
960 }
961
962 return 0;
963}
964
965static int cfhsi_open(struct net_device *dev)
966{
967 netif_wake_queue(dev);
968
969 return 0;
970}
971
972static int cfhsi_close(struct net_device *dev)
973{
974 netif_stop_queue(dev);
975
976 return 0;
977}
978
979static const struct net_device_ops cfhsi_ops = {
980 .ndo_open = cfhsi_open,
981 .ndo_stop = cfhsi_close,
982 .ndo_start_xmit = cfhsi_xmit
983};
984
985static void cfhsi_setup(struct net_device *dev)
986{
987 struct cfhsi *cfhsi = netdev_priv(dev);
988 dev->features = 0;
989 dev->netdev_ops = &cfhsi_ops;
990 dev->type = ARPHRD_CAIF;
991 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
992 dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
993 dev->tx_queue_len = 0;
994 dev->destructor = free_netdev;
995 skb_queue_head_init(&cfhsi->qhead);
996 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
997 cfhsi->cfdev.use_frag = false;
998 cfhsi->cfdev.use_stx = false;
999 cfhsi->cfdev.use_fcs = false;
1000 cfhsi->ndev = dev;
1001}
1002
1003int cfhsi_probe(struct platform_device *pdev)
1004{
1005 struct cfhsi *cfhsi = NULL;
1006 struct net_device *ndev;
1007 struct cfhsi_dev *dev;
1008 int res;
1009
1010 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
Joe Perches7ac2ed02011-08-25 13:22:24 +00001011 if (!ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001012 return -ENODEV;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001013
1014 cfhsi = netdev_priv(ndev);
1015 cfhsi->ndev = ndev;
1016 cfhsi->pdev = pdev;
1017
1018 /* Initialize state vaiables. */
1019 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001020 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001021
1022 /* Set flow info */
1023 cfhsi->flow_off_sent = 0;
1024 cfhsi->q_low_mark = LOW_WATER_MARK;
1025 cfhsi->q_high_mark = HIGH_WATER_MARK;
1026
1027 /* Assign the HSI device. */
1028 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1029 cfhsi->dev = dev;
1030
1031 /* Assign the driver to this HSI device. */
1032 dev->drv = &cfhsi->drv;
1033
1034 /*
1035 * Allocate a TX buffer with the size of a HSI packet descriptors
1036 * and the necessary room for CAIF payload frames.
1037 */
1038 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1039 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001040 res = -ENODEV;
1041 goto err_alloc_tx;
1042 }
1043
1044 /*
1045 * Allocate a RX buffer with the size of two HSI packet descriptors and
1046 * the necessary room for CAIF payload frames.
1047 */
1048 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1049 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001050 res = -ENODEV;
1051 goto err_alloc_rx;
1052 }
1053
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001054 /* Pre-calculate inactivity timeout. */
1055 if (inactivity_timeout != -1) {
1056 cfhsi->inactivity_timeout =
1057 inactivity_timeout * HZ / 1000;
1058 if (!cfhsi->inactivity_timeout)
1059 cfhsi->inactivity_timeout = 1;
1060 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1061 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1062 } else {
1063 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1064 }
1065
1066 /* Initialize recieve vaiables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001067 cfhsi->rx_ptr = cfhsi->rx_buf;
1068 cfhsi->rx_len = CFHSI_DESC_SZ;
1069
1070 /* Initialize spin locks. */
1071 spin_lock_init(&cfhsi->lock);
1072
1073 /* Set up the driver. */
1074 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1075 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +00001076 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1077 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001078
1079 /* Initialize the work queues. */
1080 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1081 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Daniel Martensson5bbed922011-10-13 11:29:28 +00001082 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001083
1084 /* Clear all bit fields. */
1085 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1086 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1087 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1088 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001089
1090 /* Create work thread. */
1091 cfhsi->wq = create_singlethread_workqueue(pdev->name);
1092 if (!cfhsi->wq) {
1093 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1094 __func__);
1095 res = -ENODEV;
1096 goto err_create_wq;
1097 }
1098
1099 /* Initialize wait queues. */
1100 init_waitqueue_head(&cfhsi->wake_up_wait);
1101 init_waitqueue_head(&cfhsi->wake_down_wait);
1102 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1103
1104 /* Setup the inactivity timer. */
1105 init_timer(&cfhsi->timer);
1106 cfhsi->timer.data = (unsigned long)cfhsi;
1107 cfhsi->timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001108 /* Setup the slowpath RX timer. */
1109 init_timer(&cfhsi->rx_slowpath_timer);
1110 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1111 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001112
1113 /* Add CAIF HSI device to list. */
1114 spin_lock(&cfhsi_list_lock);
1115 list_add_tail(&cfhsi->list, &cfhsi_list);
1116 spin_unlock(&cfhsi_list_lock);
1117
1118 /* Activate HSI interface. */
1119 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1120 if (res) {
1121 dev_err(&cfhsi->ndev->dev,
1122 "%s: can't activate HSI interface: %d.\n",
1123 __func__, res);
1124 goto err_activate;
1125 }
1126
1127 /* Flush FIFO */
1128 res = cfhsi_flush_fifo(cfhsi);
1129 if (res) {
1130 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1131 __func__, res);
1132 goto err_net_reg;
1133 }
1134
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001135 /* Register network device. */
1136 res = register_netdev(ndev);
1137 if (res) {
1138 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1139 __func__, res);
1140 goto err_net_reg;
1141 }
1142
1143 netif_stop_queue(ndev);
1144
1145 return res;
1146
1147 err_net_reg:
1148 cfhsi->dev->cfhsi_down(cfhsi->dev);
1149 err_activate:
1150 destroy_workqueue(cfhsi->wq);
1151 err_create_wq:
1152 kfree(cfhsi->rx_buf);
1153 err_alloc_rx:
1154 kfree(cfhsi->tx_buf);
1155 err_alloc_tx:
1156 free_netdev(ndev);
1157
1158 return res;
1159}
1160
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001161static void cfhsi_shutdown(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001162{
1163 u8 *tx_buf, *rx_buf;
1164
1165 /* Stop TXing */
1166 netif_tx_stop_all_queues(cfhsi->ndev);
1167
1168 /* going to shutdown driver */
1169 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1170
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001171 /* Flush workqueue */
1172 flush_workqueue(cfhsi->wq);
1173
Daniel Martensson687b13e2011-10-13 11:29:25 +00001174 /* Delete timers if pending */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001175 del_timer_sync(&cfhsi->timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001176 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001177
1178 /* Cancel pending RX request (if any) */
1179 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1180
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001181 /* Destroy workqueue */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001182 destroy_workqueue(cfhsi->wq);
1183
1184 /* Store bufferes: will be freed later. */
1185 tx_buf = cfhsi->tx_buf;
1186 rx_buf = cfhsi->rx_buf;
1187
1188 /* Flush transmit queues. */
1189 cfhsi_abort_tx(cfhsi);
1190
1191 /* Deactivate interface */
1192 cfhsi->dev->cfhsi_down(cfhsi->dev);
1193
1194 /* Finally unregister the network device. */
1195 unregister_netdev(cfhsi->ndev);
1196
1197 /* Free buffers. */
1198 kfree(tx_buf);
1199 kfree(rx_buf);
1200}
1201
1202int cfhsi_remove(struct platform_device *pdev)
1203{
1204 struct list_head *list_node;
1205 struct list_head *n;
1206 struct cfhsi *cfhsi = NULL;
1207 struct cfhsi_dev *dev;
1208
1209 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1210 spin_lock(&cfhsi_list_lock);
1211 list_for_each_safe(list_node, n, &cfhsi_list) {
1212 cfhsi = list_entry(list_node, struct cfhsi, list);
1213 /* Find the corresponding device. */
1214 if (cfhsi->dev == dev) {
1215 /* Remove from list. */
1216 list_del(list_node);
1217 spin_unlock(&cfhsi_list_lock);
1218
1219 /* Shutdown driver. */
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001220 cfhsi_shutdown(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001221
1222 return 0;
1223 }
1224 }
1225 spin_unlock(&cfhsi_list_lock);
1226 return -ENODEV;
1227}
1228
1229struct platform_driver cfhsi_plat_drv = {
1230 .probe = cfhsi_probe,
1231 .remove = cfhsi_remove,
1232 .driver = {
1233 .name = "cfhsi",
1234 .owner = THIS_MODULE,
1235 },
1236};
1237
1238static void __exit cfhsi_exit_module(void)
1239{
1240 struct list_head *list_node;
1241 struct list_head *n;
1242 struct cfhsi *cfhsi = NULL;
1243
1244 spin_lock(&cfhsi_list_lock);
1245 list_for_each_safe(list_node, n, &cfhsi_list) {
1246 cfhsi = list_entry(list_node, struct cfhsi, list);
1247
1248 /* Remove from list. */
1249 list_del(list_node);
1250 spin_unlock(&cfhsi_list_lock);
1251
1252 /* Shutdown driver. */
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001253 cfhsi_shutdown(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001254
1255 spin_lock(&cfhsi_list_lock);
1256 }
1257 spin_unlock(&cfhsi_list_lock);
1258
1259 /* Unregister platform driver. */
1260 platform_driver_unregister(&cfhsi_plat_drv);
1261}
1262
1263static int __init cfhsi_init_module(void)
1264{
1265 int result;
1266
1267 /* Initialize spin lock. */
1268 spin_lock_init(&cfhsi_list_lock);
1269
1270 /* Register platform driver. */
1271 result = platform_driver_register(&cfhsi_plat_drv);
1272 if (result) {
1273 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1274 result);
1275 goto err_dev_register;
1276 }
1277
1278 return result;
1279
1280 err_dev_register:
1281 return result;
1282}
1283
1284module_init(cfhsi_init_module);
1285module_exit(cfhsi_exit_module);