blob: 478b025c9f8be8713ec9eaf94b80081042198ad0 [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00009#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/string.h>
15#include <linux/list.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/sched.h>
19#include <linux/if_arp.h>
20#include <linux/timer.h>
21#include <net/caif/caif_layer.h>
22#include <net/caif/caif_hsi.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
26MODULE_DESCRIPTION("CAIF HSI driver");
27
28/* Returns the number of padding bytes for alignment. */
29#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
30 (((pow)-((x)&((pow)-1)))))
31
32/*
33 * HSI padding options.
34 * Warning: must be a base of 2 (& operation used) and can not be zero !
35 */
36static int hsi_head_align = 4;
37module_param(hsi_head_align, int, S_IRUGO);
38MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
39
40static int hsi_tail_align = 4;
41module_param(hsi_tail_align, int, S_IRUGO);
42MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
43
44/*
45 * HSI link layer flowcontrol thresholds.
46 * Warning: A high threshold value migth increase throughput but it will at
47 * the same time prevent channel prioritization and increase the risk of
48 * flooding the modem. The high threshold should be above the low.
49 */
50static int hsi_high_threshold = 100;
51module_param(hsi_high_threshold, int, S_IRUGO);
52MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
53
54static int hsi_low_threshold = 50;
55module_param(hsi_low_threshold, int, S_IRUGO);
56MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
57
58#define ON 1
59#define OFF 0
60
61/*
62 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
63 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
64 * de-asserted before the number of packets drops below LOW_WATER_MARK.
65 */
66#define LOW_WATER_MARK hsi_low_threshold
67#define HIGH_WATER_MARK hsi_high_threshold
68
69static LIST_HEAD(cfhsi_list);
70static spinlock_t cfhsi_list_lock;
71
72static void cfhsi_inactivity_tout(unsigned long arg)
73{
74 struct cfhsi *cfhsi = (struct cfhsi *)arg;
75
76 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
77 __func__);
78
79 /* Schedule power down work queue. */
80 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
81 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
82}
83
84static void cfhsi_abort_tx(struct cfhsi *cfhsi)
85{
86 struct sk_buff *skb;
87
88 for (;;) {
89 spin_lock_bh(&cfhsi->lock);
90 skb = skb_dequeue(&cfhsi->qhead);
91 if (!skb)
92 break;
93
94 cfhsi->ndev->stats.tx_errors++;
95 cfhsi->ndev->stats.tx_dropped++;
96 spin_unlock_bh(&cfhsi->lock);
97 kfree_skb(skb);
98 }
99 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
100 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
101 mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
102 spin_unlock_bh(&cfhsi->lock);
103}
104
105static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
106{
107 char buffer[32]; /* Any reasonable value */
108 size_t fifo_occupancy;
109 int ret;
110
111 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
112 __func__);
113
114
115 ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
116 if (ret) {
117 dev_warn(&cfhsi->ndev->dev,
118 "%s: can't wake up HSI interface: %d.\n",
119 __func__, ret);
120 return ret;
121 }
122
123 do {
124 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
125 &fifo_occupancy);
126 if (ret) {
127 dev_warn(&cfhsi->ndev->dev,
128 "%s: can't get FIFO occupancy: %d.\n",
129 __func__, ret);
130 break;
131 } else if (!fifo_occupancy)
132 /* No more data, exitting normally */
133 break;
134
135 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
136 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
137 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
138 cfhsi->dev);
139 if (ret) {
140 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
141 dev_warn(&cfhsi->ndev->dev,
142 "%s: can't read data: %d.\n",
143 __func__, ret);
144 break;
145 }
146
147 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000148 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000149 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
150
151 if (ret < 0) {
152 dev_warn(&cfhsi->ndev->dev,
153 "%s: can't wait for flush complete: %d.\n",
154 __func__, ret);
155 break;
156 } else if (!ret) {
157 ret = -ETIMEDOUT;
158 dev_warn(&cfhsi->ndev->dev,
159 "%s: timeout waiting for flush complete.\n",
160 __func__);
161 break;
162 }
163 } while (1);
164
165 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
166
167 return ret;
168}
169
170static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
171{
172 int nfrms = 0;
173 int pld_len = 0;
174 struct sk_buff *skb;
175 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
176
177 skb = skb_dequeue(&cfhsi->qhead);
178 if (!skb)
179 return 0;
180
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000181 /* Clear offset. */
182 desc->offset = 0;
183
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000184 /* Check if we can embed a CAIF frame. */
185 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
186 struct caif_payload_info *info;
187 int hpad = 0;
188 int tpad = 0;
189
190 /* Calculate needed head alignment and tail alignment. */
191 info = (struct caif_payload_info *)&skb->cb;
192
193 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
194 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
195
196 /* Check if frame still fits with added alignment. */
197 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
198 u8 *pemb = desc->emb_frm;
199 desc->offset = CFHSI_DESC_SHORT_SZ;
200 *pemb = (u8)(hpad - 1);
201 pemb += hpad;
202
203 /* Update network statistics. */
204 cfhsi->ndev->stats.tx_packets++;
205 cfhsi->ndev->stats.tx_bytes += skb->len;
206
207 /* Copy in embedded CAIF frame. */
208 skb_copy_bits(skb, 0, pemb, skb->len);
209 consume_skb(skb);
210 skb = NULL;
211 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000212 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000213
214 /* Create payload CAIF frames. */
215 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
216 while (nfrms < CFHSI_MAX_PKTS) {
217 struct caif_payload_info *info;
218 int hpad = 0;
219 int tpad = 0;
220
221 if (!skb)
222 skb = skb_dequeue(&cfhsi->qhead);
223
224 if (!skb)
225 break;
226
227 /* Calculate needed head alignment and tail alignment. */
228 info = (struct caif_payload_info *)&skb->cb;
229
230 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
231 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
232
233 /* Fill in CAIF frame length in descriptor. */
234 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
235
236 /* Fill head padding information. */
237 *pfrm = (u8)(hpad - 1);
238 pfrm += hpad;
239
240 /* Update network statistics. */
241 cfhsi->ndev->stats.tx_packets++;
242 cfhsi->ndev->stats.tx_bytes += skb->len;
243
244 /* Copy in CAIF frame. */
245 skb_copy_bits(skb, 0, pfrm, skb->len);
246
247 /* Update payload length. */
248 pld_len += desc->cffrm_len[nfrms];
249
250 /* Update frame pointer. */
251 pfrm += skb->len + tpad;
252 consume_skb(skb);
253 skb = NULL;
254
255 /* Update number of frames. */
256 nfrms++;
257 }
258
259 /* Unused length fields should be zero-filled (according to SPEC). */
260 while (nfrms < CFHSI_MAX_PKTS) {
261 desc->cffrm_len[nfrms] = 0x0000;
262 nfrms++;
263 }
264
265 /* Check if we can piggy-back another descriptor. */
266 skb = skb_peek(&cfhsi->qhead);
267 if (skb)
268 desc->header |= CFHSI_PIGGY_DESC;
269 else
270 desc->header &= ~CFHSI_PIGGY_DESC;
271
272 return CFHSI_DESC_SZ + pld_len;
273}
274
Daniel Martensson687b13e2011-10-13 11:29:25 +0000275static void cfhsi_tx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000276{
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000277 struct cfhsi_desc *desc = NULL;
278 int len = 0;
279 int res;
280
Daniel Martensson687b13e2011-10-13 11:29:25 +0000281 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000282
283 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
284 return;
285
286 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
287
288 do {
289 /*
290 * Send flow on if flow off has been previously signalled
291 * and number of packets is below low water mark.
292 */
293 spin_lock_bh(&cfhsi->lock);
294 if (cfhsi->flow_off_sent &&
295 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
296 cfhsi->cfdev.flowctrl) {
297
298 cfhsi->flow_off_sent = 0;
299 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
300 }
301 spin_unlock_bh(&cfhsi->lock);
302
303 /* Create HSI frame. */
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000304 do {
305 len = cfhsi_tx_frm(desc, cfhsi);
306 if (!len) {
307 spin_lock_bh(&cfhsi->lock);
308 if (unlikely(skb_peek(&cfhsi->qhead))) {
309 spin_unlock_bh(&cfhsi->lock);
310 continue;
311 }
312 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
313 /* Start inactivity timer. */
314 mod_timer(&cfhsi->timer,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000315 jiffies + CFHSI_INACTIVITY_TOUT);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000316 spin_unlock_bh(&cfhsi->lock);
317 goto done;
318 }
319 } while (!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000320
321 /* Set up new transfer. */
322 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
323 if (WARN_ON(res < 0)) {
324 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
325 __func__, res);
326 }
327 } while (res < 0);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000328
329done:
330 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000331}
332
333static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
334{
335 struct cfhsi *cfhsi;
336
337 cfhsi = container_of(drv, struct cfhsi, drv);
338 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
339 __func__);
340
341 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
342 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000343 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000344}
345
Daniel Martensson687b13e2011-10-13 11:29:25 +0000346static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi,
347 bool *dump)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000348{
349 int xfer_sz = 0;
350 int nfrms = 0;
351 u16 *plen = NULL;
352 u8 *pfrm = NULL;
353
354 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
355 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
356 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
357 __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000358 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000359 return 0;
360 }
361
362 /* Check for embedded CAIF frame. */
363 if (desc->offset) {
364 struct sk_buff *skb;
365 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000366 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000367 pfrm = ((u8 *)desc) + desc->offset;
368
369 /* Remove offset padding. */
370 pfrm += *pfrm + 1;
371
372 /* Read length of CAIF frame (little endian). */
373 len = *pfrm;
374 len |= ((*(pfrm+1)) << 8) & 0xFF00;
375 len += 2; /* Add FCS fields. */
376
377
378 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000379 skb = alloc_skb(len + 1, GFP_ATOMIC);
380 if (!skb) {
381 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
382 __func__);
383 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000384 }
385 caif_assert(skb != NULL);
386
387 dst = skb_put(skb, len);
388 memcpy(dst, pfrm, len);
389
390 skb->protocol = htons(ETH_P_CAIF);
391 skb_reset_mac_header(skb);
392 skb->dev = cfhsi->ndev;
393
394 /*
395 * We are called from a arch specific platform device.
396 * Unfortunately we don't know what context we're
397 * running in.
398 */
399 if (in_interrupt())
400 netif_rx(skb);
401 else
402 netif_rx_ni(skb);
403
404 /* Update network statistics. */
405 cfhsi->ndev->stats.rx_packets++;
406 cfhsi->ndev->stats.rx_bytes += len;
407 }
408
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000409 /* Calculate transfer length. */
410 plen = desc->cffrm_len;
411 while (nfrms < CFHSI_MAX_PKTS && *plen) {
412 xfer_sz += *plen;
413 plen++;
414 nfrms++;
415 }
416
417 /* Check for piggy-backed descriptor. */
418 if (desc->header & CFHSI_PIGGY_DESC)
419 xfer_sz += CFHSI_DESC_SZ;
420
421 if (xfer_sz % 4) {
422 dev_err(&cfhsi->ndev->dev,
423 "%s: Invalid payload len: %d, ignored.\n",
424 __func__, xfer_sz);
425 xfer_sz = 0;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000426 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000427 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000428 return xfer_sz;
429}
430
Daniel Martensson687b13e2011-10-13 11:29:25 +0000431static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi,
432 bool *dump)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000433{
434 int rx_sz = 0;
435 int nfrms = 0;
436 u16 *plen = NULL;
437 u8 *pfrm = NULL;
438
439 /* Sanity check header and offset. */
440 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
441 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
442 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
443 __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000444 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000445 return -EINVAL;
446 }
447
448 /* Set frame pointer to start of payload. */
449 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
450 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000451
452 /* Skip already processed frames. */
453 while (nfrms < cfhsi->rx_state.nfrms) {
454 pfrm += *plen;
455 rx_sz += *plen;
456 plen++;
457 nfrms++;
458 }
459
460 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000461 while (nfrms < CFHSI_MAX_PKTS && *plen) {
462 struct sk_buff *skb;
463 u8 *dst = NULL;
464 u8 *pcffrm = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000465 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000466
467 if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
468 dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
469 __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000470 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000471 return -EINVAL;
472 }
473
474 /* CAIF frame starts after head padding. */
475 pcffrm = pfrm + *pfrm + 1;
476
477 /* Read length of CAIF frame (little endian). */
478 len = *pcffrm;
479 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
480 len += 2; /* Add FCS fields. */
481
482 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000483 skb = alloc_skb(len + 1, GFP_ATOMIC);
484 if (!skb) {
485 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
486 __func__);
487 cfhsi->rx_state.nfrms = nfrms;
488 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000489 }
490 caif_assert(skb != NULL);
491
492 dst = skb_put(skb, len);
493 memcpy(dst, pcffrm, len);
494
495 skb->protocol = htons(ETH_P_CAIF);
496 skb_reset_mac_header(skb);
497 skb->dev = cfhsi->ndev;
498
499 /*
500 * We're called from a platform device,
501 * and don't know the context we're running in.
502 */
503 if (in_interrupt())
504 netif_rx(skb);
505 else
506 netif_rx_ni(skb);
507
508 /* Update network statistics. */
509 cfhsi->ndev->stats.rx_packets++;
510 cfhsi->ndev->stats.rx_bytes += len;
511
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000512 pfrm += *plen;
513 rx_sz += *plen;
514 plen++;
515 nfrms++;
516 }
517
518 return rx_sz;
519}
520
Daniel Martensson687b13e2011-10-13 11:29:25 +0000521static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000522{
523 int res;
524 int desc_pld_len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000525 struct cfhsi_desc *desc = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000526 bool dump = false;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000527
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000528 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
529
Daniel Martensson687b13e2011-10-13 11:29:25 +0000530 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000531
532 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
533 return;
534
535 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000536 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000537 mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000538 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000539
Daniel Martensson687b13e2011-10-13 11:29:25 +0000540 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
541 desc_pld_len = cfhsi_rx_desc(desc, cfhsi, &dump);
542 if (desc_pld_len == -ENOMEM)
543 goto restart;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000544 } else {
545 int pld_len;
546
Daniel Martensson687b13e2011-10-13 11:29:25 +0000547 if (!cfhsi->rx_state.piggy_desc) {
548 pld_len = cfhsi_rx_pld(desc, cfhsi, &dump);
549 if (pld_len == -ENOMEM)
550 goto restart;
551 cfhsi->rx_state.pld_len = pld_len;
552 } else {
553 pld_len = cfhsi->rx_state.pld_len;
554 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000555
556 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
557 struct cfhsi_desc *piggy_desc;
558 piggy_desc = (struct cfhsi_desc *)
559 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
560 pld_len);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000561 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000562
563 /* Extract piggy-backed descriptor. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000564 desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi, &dump);
565 if (desc_pld_len == -ENOMEM)
566 goto restart;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000567
568 /*
569 * Copy needed information from the piggy-backed
570 * descriptor to the descriptor in the start.
571 */
572 memcpy((u8 *)desc, (u8 *)piggy_desc,
573 CFHSI_DESC_SHORT_SZ);
574 }
575 }
576
Daniel Martensson687b13e2011-10-13 11:29:25 +0000577 if (unlikely(dump)) {
578 size_t rx_offset = cfhsi->rx_ptr - cfhsi->rx_buf;
579 dev_err(&cfhsi->ndev->dev, "%s: RX offset: %u.\n",
580 __func__, (unsigned) rx_offset);
581 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
582 cfhsi->rx_buf, cfhsi->rx_len + rx_offset);
583 }
584
585 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000586 if (desc_pld_len) {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000587 cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000588 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
589 cfhsi->rx_len = desc_pld_len;
590 } else {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000591 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000592 cfhsi->rx_ptr = cfhsi->rx_buf;
593 cfhsi->rx_len = CFHSI_DESC_SZ;
594 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000595
596 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
597 /* Set up new transfer. */
598 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
599 __func__);
600 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
601 cfhsi->dev);
602 if (WARN_ON(res < 0)) {
603 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
604 __func__, res);
605 cfhsi->ndev->stats.rx_errors++;
606 cfhsi->ndev->stats.rx_dropped++;
607 }
608 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000609 return;
610
611restart:
612 if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
613 dev_err(&cfhsi->ndev->dev, "%s: No memory available "
614 "in %d iterations.\n",
615 __func__, CFHSI_MAX_RX_RETRIES);
616 BUG();
617 }
618 mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
619}
620
621static void cfhsi_rx_slowpath(unsigned long arg)
622{
623 struct cfhsi *cfhsi = (struct cfhsi *)arg;
624
625 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
626 __func__);
627
628 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000629}
630
631static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
632{
633 struct cfhsi *cfhsi;
634
635 cfhsi = container_of(drv, struct cfhsi, drv);
636 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
637 __func__);
638
639 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
640 return;
641
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000642 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
643 wake_up_interruptible(&cfhsi->flush_fifo_wait);
644 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000645 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000646}
647
648static void cfhsi_wake_up(struct work_struct *work)
649{
650 struct cfhsi *cfhsi = NULL;
651 int res;
652 int len;
653 long ret;
654
655 cfhsi = container_of(work, struct cfhsi, wake_up_work);
656
657 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
658 return;
659
660 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
661 /* It happenes when wakeup is requested by
662 * both ends at the same time. */
663 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
664 return;
665 }
666
667 /* Activate wake line. */
668 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
669
670 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
671 __func__);
672
673 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000674 ret = CFHSI_WAKE_TOUT;
675 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
676 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000677 &cfhsi->bits), ret);
678 if (unlikely(ret < 0)) {
679 /* Interrupted by signal. */
680 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
681 __func__, ret);
682 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
683 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
684 return;
685 } else if (!ret) {
686 /* Wakeup timeout */
687 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
688 __func__);
689 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
690 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
691 return;
692 }
693 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
694 __func__);
695
696 /* Clear power up bit. */
697 set_bit(CFHSI_AWAKE, &cfhsi->bits);
698 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
699
700 /* Resume read operation. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000701 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
702 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
703
704 if (WARN_ON(res < 0))
705 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000706
707 /* Clear power up acknowledment. */
708 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
709
710 spin_lock_bh(&cfhsi->lock);
711
712 /* Resume transmit if queue is not empty. */
713 if (!skb_peek(&cfhsi->qhead)) {
714 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
715 __func__);
716 /* Start inactivity timer. */
717 mod_timer(&cfhsi->timer,
718 jiffies + CFHSI_INACTIVITY_TOUT);
719 spin_unlock_bh(&cfhsi->lock);
720 return;
721 }
722
723 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
724 __func__);
725
726 spin_unlock_bh(&cfhsi->lock);
727
728 /* Create HSI frame. */
729 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
730
731 if (likely(len > 0)) {
732 /* Set up new transfer. */
733 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
734 if (WARN_ON(res < 0)) {
735 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
736 __func__, res);
737 cfhsi_abort_tx(cfhsi);
738 }
739 } else {
740 dev_err(&cfhsi->ndev->dev,
741 "%s: Failed to create HSI frame: %d.\n",
742 __func__, len);
743 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000744}
745
746static void cfhsi_wake_down(struct work_struct *work)
747{
748 long ret;
749 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000750 size_t fifo_occupancy = 0;
751 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000752
753 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000754 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000755
756 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
757 return;
758
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000759 /* Deactivate wake line. */
760 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
761
762 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000763 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000764 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000765 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
766 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000767 if (ret < 0) {
768 /* Interrupted by signal. */
769 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
770 __func__, ret);
771 return;
772 } else if (!ret) {
773 /* Timeout */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000774 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000775 }
776
Daniel Martensson687b13e2011-10-13 11:29:25 +0000777 /* Check FIFO occupancy. */
778 while (retry) {
779 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
780 &fifo_occupancy));
781
782 if (!fifo_occupancy)
783 break;
784
785 set_current_state(TASK_INTERRUPTIBLE);
786 schedule_timeout(1);
787 retry--;
788 }
789
790 if (!retry)
791 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
792
793 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000794 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
795
Daniel Martensson687b13e2011-10-13 11:29:25 +0000796 /* Cancel pending RX requests. */
797 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000798
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000799}
800
801static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
802{
803 struct cfhsi *cfhsi = NULL;
804
805 cfhsi = container_of(drv, struct cfhsi, drv);
806 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
807 __func__);
808
809 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
810 wake_up_interruptible(&cfhsi->wake_up_wait);
811
812 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
813 return;
814
815 /* Schedule wake up work queue if the peer initiates. */
816 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
817 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
818}
819
820static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
821{
822 struct cfhsi *cfhsi = NULL;
823
824 cfhsi = container_of(drv, struct cfhsi, drv);
825 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
826 __func__);
827
828 /* Initiating low power is only permitted by the host (us). */
829 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
830 wake_up_interruptible(&cfhsi->wake_down_wait);
831}
832
833static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
834{
835 struct cfhsi *cfhsi = NULL;
836 int start_xfer = 0;
837 int timer_active;
838
839 if (!dev)
840 return -EINVAL;
841
842 cfhsi = netdev_priv(dev);
843
844 spin_lock_bh(&cfhsi->lock);
845
846 skb_queue_tail(&cfhsi->qhead, skb);
847
848 /* Sanity check; xmit should not be called after unregister_netdev */
849 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
850 spin_unlock_bh(&cfhsi->lock);
851 cfhsi_abort_tx(cfhsi);
852 return -EINVAL;
853 }
854
855 /* Send flow off if number of packets is above high water mark. */
856 if (!cfhsi->flow_off_sent &&
857 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
858 cfhsi->cfdev.flowctrl) {
859 cfhsi->flow_off_sent = 1;
860 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
861 }
862
863 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
864 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
865 start_xfer = 1;
866 }
867
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000868 if (!start_xfer) {
869 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000870 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000871 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000872
873 /* Delete inactivity timer if started. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000874 timer_active = del_timer_sync(&cfhsi->timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000875
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000876 spin_unlock_bh(&cfhsi->lock);
877
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000878 if (timer_active) {
879 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
880 int len;
881 int res;
882
883 /* Create HSI frame. */
884 len = cfhsi_tx_frm(desc, cfhsi);
885 BUG_ON(!len);
886
887 /* Set up new transfer. */
888 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
889 if (WARN_ON(res < 0)) {
890 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
891 __func__, res);
892 cfhsi_abort_tx(cfhsi);
893 }
894 } else {
895 /* Schedule wake up work queue if the we initiate. */
896 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
897 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
898 }
899
900 return 0;
901}
902
903static int cfhsi_open(struct net_device *dev)
904{
905 netif_wake_queue(dev);
906
907 return 0;
908}
909
910static int cfhsi_close(struct net_device *dev)
911{
912 netif_stop_queue(dev);
913
914 return 0;
915}
916
917static const struct net_device_ops cfhsi_ops = {
918 .ndo_open = cfhsi_open,
919 .ndo_stop = cfhsi_close,
920 .ndo_start_xmit = cfhsi_xmit
921};
922
923static void cfhsi_setup(struct net_device *dev)
924{
925 struct cfhsi *cfhsi = netdev_priv(dev);
926 dev->features = 0;
927 dev->netdev_ops = &cfhsi_ops;
928 dev->type = ARPHRD_CAIF;
929 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
930 dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
931 dev->tx_queue_len = 0;
932 dev->destructor = free_netdev;
933 skb_queue_head_init(&cfhsi->qhead);
934 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
935 cfhsi->cfdev.use_frag = false;
936 cfhsi->cfdev.use_stx = false;
937 cfhsi->cfdev.use_fcs = false;
938 cfhsi->ndev = dev;
939}
940
941int cfhsi_probe(struct platform_device *pdev)
942{
943 struct cfhsi *cfhsi = NULL;
944 struct net_device *ndev;
945 struct cfhsi_dev *dev;
946 int res;
947
948 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
Joe Perches7ac2ed02011-08-25 13:22:24 +0000949 if (!ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000950 return -ENODEV;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000951
952 cfhsi = netdev_priv(ndev);
953 cfhsi->ndev = ndev;
954 cfhsi->pdev = pdev;
955
956 /* Initialize state vaiables. */
957 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000958 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000959
960 /* Set flow info */
961 cfhsi->flow_off_sent = 0;
962 cfhsi->q_low_mark = LOW_WATER_MARK;
963 cfhsi->q_high_mark = HIGH_WATER_MARK;
964
965 /* Assign the HSI device. */
966 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
967 cfhsi->dev = dev;
968
969 /* Assign the driver to this HSI device. */
970 dev->drv = &cfhsi->drv;
971
972 /*
973 * Allocate a TX buffer with the size of a HSI packet descriptors
974 * and the necessary room for CAIF payload frames.
975 */
976 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
977 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000978 res = -ENODEV;
979 goto err_alloc_tx;
980 }
981
982 /*
983 * Allocate a RX buffer with the size of two HSI packet descriptors and
984 * the necessary room for CAIF payload frames.
985 */
986 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
987 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000988 res = -ENODEV;
989 goto err_alloc_rx;
990 }
991
Joe Perches864834f2011-06-29 05:52:03 -0700992 /* Initialize receive variables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000993 cfhsi->rx_ptr = cfhsi->rx_buf;
994 cfhsi->rx_len = CFHSI_DESC_SZ;
995
996 /* Initialize spin locks. */
997 spin_lock_init(&cfhsi->lock);
998
999 /* Set up the driver. */
1000 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1001 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +00001002 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1003 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001004
1005 /* Initialize the work queues. */
1006 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1007 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001008
1009 /* Clear all bit fields. */
1010 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1011 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1012 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1013 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001014
1015 /* Create work thread. */
1016 cfhsi->wq = create_singlethread_workqueue(pdev->name);
1017 if (!cfhsi->wq) {
1018 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1019 __func__);
1020 res = -ENODEV;
1021 goto err_create_wq;
1022 }
1023
1024 /* Initialize wait queues. */
1025 init_waitqueue_head(&cfhsi->wake_up_wait);
1026 init_waitqueue_head(&cfhsi->wake_down_wait);
1027 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1028
1029 /* Setup the inactivity timer. */
1030 init_timer(&cfhsi->timer);
1031 cfhsi->timer.data = (unsigned long)cfhsi;
1032 cfhsi->timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001033 /* Setup the slowpath RX timer. */
1034 init_timer(&cfhsi->rx_slowpath_timer);
1035 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1036 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001037
1038 /* Add CAIF HSI device to list. */
1039 spin_lock(&cfhsi_list_lock);
1040 list_add_tail(&cfhsi->list, &cfhsi_list);
1041 spin_unlock(&cfhsi_list_lock);
1042
1043 /* Activate HSI interface. */
1044 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1045 if (res) {
1046 dev_err(&cfhsi->ndev->dev,
1047 "%s: can't activate HSI interface: %d.\n",
1048 __func__, res);
1049 goto err_activate;
1050 }
1051
1052 /* Flush FIFO */
1053 res = cfhsi_flush_fifo(cfhsi);
1054 if (res) {
1055 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1056 __func__, res);
1057 goto err_net_reg;
1058 }
1059
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001060 /* Register network device. */
1061 res = register_netdev(ndev);
1062 if (res) {
1063 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1064 __func__, res);
1065 goto err_net_reg;
1066 }
1067
1068 netif_stop_queue(ndev);
1069
1070 return res;
1071
1072 err_net_reg:
1073 cfhsi->dev->cfhsi_down(cfhsi->dev);
1074 err_activate:
1075 destroy_workqueue(cfhsi->wq);
1076 err_create_wq:
1077 kfree(cfhsi->rx_buf);
1078 err_alloc_rx:
1079 kfree(cfhsi->tx_buf);
1080 err_alloc_tx:
1081 free_netdev(ndev);
1082
1083 return res;
1084}
1085
1086static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
1087{
1088 u8 *tx_buf, *rx_buf;
1089
1090 /* Stop TXing */
1091 netif_tx_stop_all_queues(cfhsi->ndev);
1092
1093 /* going to shutdown driver */
1094 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1095
1096 if (remove_platform_dev) {
1097 /* Flush workqueue */
1098 flush_workqueue(cfhsi->wq);
1099
1100 /* Notify device. */
1101 platform_device_unregister(cfhsi->pdev);
1102 }
1103
1104 /* Flush workqueue */
1105 flush_workqueue(cfhsi->wq);
1106
Daniel Martensson687b13e2011-10-13 11:29:25 +00001107 /* Delete timers if pending */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001108 del_timer_sync(&cfhsi->timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001109 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001110
1111 /* Cancel pending RX request (if any) */
1112 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1113
1114 /* Flush again and destroy workqueue */
1115 destroy_workqueue(cfhsi->wq);
1116
1117 /* Store bufferes: will be freed later. */
1118 tx_buf = cfhsi->tx_buf;
1119 rx_buf = cfhsi->rx_buf;
1120
1121 /* Flush transmit queues. */
1122 cfhsi_abort_tx(cfhsi);
1123
1124 /* Deactivate interface */
1125 cfhsi->dev->cfhsi_down(cfhsi->dev);
1126
1127 /* Finally unregister the network device. */
1128 unregister_netdev(cfhsi->ndev);
1129
1130 /* Free buffers. */
1131 kfree(tx_buf);
1132 kfree(rx_buf);
1133}
1134
1135int cfhsi_remove(struct platform_device *pdev)
1136{
1137 struct list_head *list_node;
1138 struct list_head *n;
1139 struct cfhsi *cfhsi = NULL;
1140 struct cfhsi_dev *dev;
1141
1142 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1143 spin_lock(&cfhsi_list_lock);
1144 list_for_each_safe(list_node, n, &cfhsi_list) {
1145 cfhsi = list_entry(list_node, struct cfhsi, list);
1146 /* Find the corresponding device. */
1147 if (cfhsi->dev == dev) {
1148 /* Remove from list. */
1149 list_del(list_node);
1150 spin_unlock(&cfhsi_list_lock);
1151
1152 /* Shutdown driver. */
1153 cfhsi_shutdown(cfhsi, false);
1154
1155 return 0;
1156 }
1157 }
1158 spin_unlock(&cfhsi_list_lock);
1159 return -ENODEV;
1160}
1161
1162struct platform_driver cfhsi_plat_drv = {
1163 .probe = cfhsi_probe,
1164 .remove = cfhsi_remove,
1165 .driver = {
1166 .name = "cfhsi",
1167 .owner = THIS_MODULE,
1168 },
1169};
1170
1171static void __exit cfhsi_exit_module(void)
1172{
1173 struct list_head *list_node;
1174 struct list_head *n;
1175 struct cfhsi *cfhsi = NULL;
1176
1177 spin_lock(&cfhsi_list_lock);
1178 list_for_each_safe(list_node, n, &cfhsi_list) {
1179 cfhsi = list_entry(list_node, struct cfhsi, list);
1180
1181 /* Remove from list. */
1182 list_del(list_node);
1183 spin_unlock(&cfhsi_list_lock);
1184
1185 /* Shutdown driver. */
1186 cfhsi_shutdown(cfhsi, true);
1187
1188 spin_lock(&cfhsi_list_lock);
1189 }
1190 spin_unlock(&cfhsi_list_lock);
1191
1192 /* Unregister platform driver. */
1193 platform_driver_unregister(&cfhsi_plat_drv);
1194}
1195
1196static int __init cfhsi_init_module(void)
1197{
1198 int result;
1199
1200 /* Initialize spin lock. */
1201 spin_lock_init(&cfhsi_list_lock);
1202
1203 /* Register platform driver. */
1204 result = platform_driver_register(&cfhsi_plat_drv);
1205 if (result) {
1206 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1207 result);
1208 goto err_dev_register;
1209 }
1210
1211 return result;
1212
1213 err_dev_register:
1214 return result;
1215}
1216
1217module_init(cfhsi_init_module);
1218module_exit(cfhsi_exit_module);