blob: 1e1f0a372ffaa38ec85f1e6e1c94b63550a0b866 [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00009#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/string.h>
15#include <linux/list.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/sched.h>
19#include <linux/if_arp.h>
20#include <linux/timer.h>
21#include <net/caif/caif_layer.h>
22#include <net/caif/caif_hsi.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
26MODULE_DESCRIPTION("CAIF HSI driver");
27
28/* Returns the number of padding bytes for alignment. */
29#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
30 (((pow)-((x)&((pow)-1)))))
31
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +000032static int inactivity_timeout = 1000;
33module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
35
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000036/*
37 * HSI padding options.
38 * Warning: must be a base of 2 (& operation used) and can not be zero !
39 */
40static int hsi_head_align = 4;
41module_param(hsi_head_align, int, S_IRUGO);
42MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
43
44static int hsi_tail_align = 4;
45module_param(hsi_tail_align, int, S_IRUGO);
46MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
47
48/*
49 * HSI link layer flowcontrol thresholds.
50 * Warning: A high threshold value migth increase throughput but it will at
51 * the same time prevent channel prioritization and increase the risk of
52 * flooding the modem. The high threshold should be above the low.
53 */
54static int hsi_high_threshold = 100;
55module_param(hsi_high_threshold, int, S_IRUGO);
56MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
57
58static int hsi_low_threshold = 50;
59module_param(hsi_low_threshold, int, S_IRUGO);
60MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
61
62#define ON 1
63#define OFF 0
64
65/*
66 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
67 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
68 * de-asserted before the number of packets drops below LOW_WATER_MARK.
69 */
70#define LOW_WATER_MARK hsi_low_threshold
71#define HIGH_WATER_MARK hsi_high_threshold
72
73static LIST_HEAD(cfhsi_list);
74static spinlock_t cfhsi_list_lock;
75
76static void cfhsi_inactivity_tout(unsigned long arg)
77{
78 struct cfhsi *cfhsi = (struct cfhsi *)arg;
79
80 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
81 __func__);
82
83 /* Schedule power down work queue. */
84 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
85 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
86}
87
88static void cfhsi_abort_tx(struct cfhsi *cfhsi)
89{
90 struct sk_buff *skb;
91
92 for (;;) {
93 spin_lock_bh(&cfhsi->lock);
94 skb = skb_dequeue(&cfhsi->qhead);
95 if (!skb)
96 break;
97
98 cfhsi->ndev->stats.tx_errors++;
99 cfhsi->ndev->stats.tx_dropped++;
100 spin_unlock_bh(&cfhsi->lock);
101 kfree_skb(skb);
102 }
103 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
104 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000105 mod_timer(&cfhsi->timer,
106 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000107 spin_unlock_bh(&cfhsi->lock);
108}
109
110static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
111{
112 char buffer[32]; /* Any reasonable value */
113 size_t fifo_occupancy;
114 int ret;
115
116 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
117 __func__);
118
119
120 ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
121 if (ret) {
122 dev_warn(&cfhsi->ndev->dev,
123 "%s: can't wake up HSI interface: %d.\n",
124 __func__, ret);
125 return ret;
126 }
127
128 do {
129 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
130 &fifo_occupancy);
131 if (ret) {
132 dev_warn(&cfhsi->ndev->dev,
133 "%s: can't get FIFO occupancy: %d.\n",
134 __func__, ret);
135 break;
136 } else if (!fifo_occupancy)
137 /* No more data, exitting normally */
138 break;
139
140 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
141 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
142 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
143 cfhsi->dev);
144 if (ret) {
145 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
146 dev_warn(&cfhsi->ndev->dev,
147 "%s: can't read data: %d.\n",
148 __func__, ret);
149 break;
150 }
151
152 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000153 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000154 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
155
156 if (ret < 0) {
157 dev_warn(&cfhsi->ndev->dev,
158 "%s: can't wait for flush complete: %d.\n",
159 __func__, ret);
160 break;
161 } else if (!ret) {
162 ret = -ETIMEDOUT;
163 dev_warn(&cfhsi->ndev->dev,
164 "%s: timeout waiting for flush complete.\n",
165 __func__);
166 break;
167 }
168 } while (1);
169
170 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
171
172 return ret;
173}
174
175static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
176{
177 int nfrms = 0;
178 int pld_len = 0;
179 struct sk_buff *skb;
180 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
181
182 skb = skb_dequeue(&cfhsi->qhead);
183 if (!skb)
184 return 0;
185
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000186 /* Clear offset. */
187 desc->offset = 0;
188
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000189 /* Check if we can embed a CAIF frame. */
190 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
191 struct caif_payload_info *info;
192 int hpad = 0;
193 int tpad = 0;
194
195 /* Calculate needed head alignment and tail alignment. */
196 info = (struct caif_payload_info *)&skb->cb;
197
198 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
199 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
200
201 /* Check if frame still fits with added alignment. */
202 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
203 u8 *pemb = desc->emb_frm;
204 desc->offset = CFHSI_DESC_SHORT_SZ;
205 *pemb = (u8)(hpad - 1);
206 pemb += hpad;
207
208 /* Update network statistics. */
209 cfhsi->ndev->stats.tx_packets++;
210 cfhsi->ndev->stats.tx_bytes += skb->len;
211
212 /* Copy in embedded CAIF frame. */
213 skb_copy_bits(skb, 0, pemb, skb->len);
214 consume_skb(skb);
215 skb = NULL;
216 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000217 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000218
219 /* Create payload CAIF frames. */
220 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
221 while (nfrms < CFHSI_MAX_PKTS) {
222 struct caif_payload_info *info;
223 int hpad = 0;
224 int tpad = 0;
225
226 if (!skb)
227 skb = skb_dequeue(&cfhsi->qhead);
228
229 if (!skb)
230 break;
231
232 /* Calculate needed head alignment and tail alignment. */
233 info = (struct caif_payload_info *)&skb->cb;
234
235 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
236 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
237
238 /* Fill in CAIF frame length in descriptor. */
239 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
240
241 /* Fill head padding information. */
242 *pfrm = (u8)(hpad - 1);
243 pfrm += hpad;
244
245 /* Update network statistics. */
246 cfhsi->ndev->stats.tx_packets++;
247 cfhsi->ndev->stats.tx_bytes += skb->len;
248
249 /* Copy in CAIF frame. */
250 skb_copy_bits(skb, 0, pfrm, skb->len);
251
252 /* Update payload length. */
253 pld_len += desc->cffrm_len[nfrms];
254
255 /* Update frame pointer. */
256 pfrm += skb->len + tpad;
257 consume_skb(skb);
258 skb = NULL;
259
260 /* Update number of frames. */
261 nfrms++;
262 }
263
264 /* Unused length fields should be zero-filled (according to SPEC). */
265 while (nfrms < CFHSI_MAX_PKTS) {
266 desc->cffrm_len[nfrms] = 0x0000;
267 nfrms++;
268 }
269
270 /* Check if we can piggy-back another descriptor. */
271 skb = skb_peek(&cfhsi->qhead);
272 if (skb)
273 desc->header |= CFHSI_PIGGY_DESC;
274 else
275 desc->header &= ~CFHSI_PIGGY_DESC;
276
277 return CFHSI_DESC_SZ + pld_len;
278}
279
Daniel Martensson687b13e2011-10-13 11:29:25 +0000280static void cfhsi_tx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000281{
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000282 struct cfhsi_desc *desc = NULL;
283 int len = 0;
284 int res;
285
Daniel Martensson687b13e2011-10-13 11:29:25 +0000286 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000287
288 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
289 return;
290
291 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
292
293 do {
294 /*
295 * Send flow on if flow off has been previously signalled
296 * and number of packets is below low water mark.
297 */
298 spin_lock_bh(&cfhsi->lock);
299 if (cfhsi->flow_off_sent &&
300 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
301 cfhsi->cfdev.flowctrl) {
302
303 cfhsi->flow_off_sent = 0;
304 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
305 }
306 spin_unlock_bh(&cfhsi->lock);
307
308 /* Create HSI frame. */
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000309 do {
310 len = cfhsi_tx_frm(desc, cfhsi);
311 if (!len) {
312 spin_lock_bh(&cfhsi->lock);
313 if (unlikely(skb_peek(&cfhsi->qhead))) {
314 spin_unlock_bh(&cfhsi->lock);
315 continue;
316 }
317 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
318 /* Start inactivity timer. */
319 mod_timer(&cfhsi->timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000320 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000321 spin_unlock_bh(&cfhsi->lock);
322 goto done;
323 }
324 } while (!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000325
326 /* Set up new transfer. */
327 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
328 if (WARN_ON(res < 0)) {
329 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
330 __func__, res);
331 }
332 } while (res < 0);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000333
334done:
335 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000336}
337
338static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
339{
340 struct cfhsi *cfhsi;
341
342 cfhsi = container_of(drv, struct cfhsi, drv);
343 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
344 __func__);
345
346 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
347 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000348 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000349}
350
Daniel Martensson687b13e2011-10-13 11:29:25 +0000351static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi,
352 bool *dump)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000353{
354 int xfer_sz = 0;
355 int nfrms = 0;
356 u16 *plen = NULL;
357 u8 *pfrm = NULL;
358
359 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
360 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
361 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
362 __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000363 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000364 return 0;
365 }
366
367 /* Check for embedded CAIF frame. */
368 if (desc->offset) {
369 struct sk_buff *skb;
370 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000371 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000372 pfrm = ((u8 *)desc) + desc->offset;
373
374 /* Remove offset padding. */
375 pfrm += *pfrm + 1;
376
377 /* Read length of CAIF frame (little endian). */
378 len = *pfrm;
379 len |= ((*(pfrm+1)) << 8) & 0xFF00;
380 len += 2; /* Add FCS fields. */
381
382
383 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000384 skb = alloc_skb(len + 1, GFP_ATOMIC);
385 if (!skb) {
386 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
387 __func__);
388 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000389 }
390 caif_assert(skb != NULL);
391
392 dst = skb_put(skb, len);
393 memcpy(dst, pfrm, len);
394
395 skb->protocol = htons(ETH_P_CAIF);
396 skb_reset_mac_header(skb);
397 skb->dev = cfhsi->ndev;
398
399 /*
400 * We are called from a arch specific platform device.
401 * Unfortunately we don't know what context we're
402 * running in.
403 */
404 if (in_interrupt())
405 netif_rx(skb);
406 else
407 netif_rx_ni(skb);
408
409 /* Update network statistics. */
410 cfhsi->ndev->stats.rx_packets++;
411 cfhsi->ndev->stats.rx_bytes += len;
412 }
413
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000414 /* Calculate transfer length. */
415 plen = desc->cffrm_len;
416 while (nfrms < CFHSI_MAX_PKTS && *plen) {
417 xfer_sz += *plen;
418 plen++;
419 nfrms++;
420 }
421
422 /* Check for piggy-backed descriptor. */
423 if (desc->header & CFHSI_PIGGY_DESC)
424 xfer_sz += CFHSI_DESC_SZ;
425
426 if (xfer_sz % 4) {
427 dev_err(&cfhsi->ndev->dev,
428 "%s: Invalid payload len: %d, ignored.\n",
429 __func__, xfer_sz);
430 xfer_sz = 0;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000431 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000432 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000433 return xfer_sz;
434}
435
Daniel Martensson687b13e2011-10-13 11:29:25 +0000436static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi,
437 bool *dump)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000438{
439 int rx_sz = 0;
440 int nfrms = 0;
441 u16 *plen = NULL;
442 u8 *pfrm = NULL;
443
444 /* Sanity check header and offset. */
445 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
446 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
447 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
448 __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000449 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000450 return -EINVAL;
451 }
452
453 /* Set frame pointer to start of payload. */
454 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
455 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000456
457 /* Skip already processed frames. */
458 while (nfrms < cfhsi->rx_state.nfrms) {
459 pfrm += *plen;
460 rx_sz += *plen;
461 plen++;
462 nfrms++;
463 }
464
465 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000466 while (nfrms < CFHSI_MAX_PKTS && *plen) {
467 struct sk_buff *skb;
468 u8 *dst = NULL;
469 u8 *pcffrm = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000470 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000471
472 if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
473 dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
474 __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000475 *dump = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000476 return -EINVAL;
477 }
478
479 /* CAIF frame starts after head padding. */
480 pcffrm = pfrm + *pfrm + 1;
481
482 /* Read length of CAIF frame (little endian). */
483 len = *pcffrm;
484 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
485 len += 2; /* Add FCS fields. */
486
487 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000488 skb = alloc_skb(len + 1, GFP_ATOMIC);
489 if (!skb) {
490 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
491 __func__);
492 cfhsi->rx_state.nfrms = nfrms;
493 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000494 }
495 caif_assert(skb != NULL);
496
497 dst = skb_put(skb, len);
498 memcpy(dst, pcffrm, len);
499
500 skb->protocol = htons(ETH_P_CAIF);
501 skb_reset_mac_header(skb);
502 skb->dev = cfhsi->ndev;
503
504 /*
505 * We're called from a platform device,
506 * and don't know the context we're running in.
507 */
508 if (in_interrupt())
509 netif_rx(skb);
510 else
511 netif_rx_ni(skb);
512
513 /* Update network statistics. */
514 cfhsi->ndev->stats.rx_packets++;
515 cfhsi->ndev->stats.rx_bytes += len;
516
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000517 pfrm += *plen;
518 rx_sz += *plen;
519 plen++;
520 nfrms++;
521 }
522
523 return rx_sz;
524}
525
Daniel Martensson687b13e2011-10-13 11:29:25 +0000526static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000527{
528 int res;
529 int desc_pld_len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000530 struct cfhsi_desc *desc = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000531 bool dump = false;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000532
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000533 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
534
Daniel Martensson687b13e2011-10-13 11:29:25 +0000535 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000536
537 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
538 return;
539
540 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000541 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000542 mod_timer_pending(&cfhsi->timer,
543 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000544 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000545
Daniel Martensson687b13e2011-10-13 11:29:25 +0000546 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
547 desc_pld_len = cfhsi_rx_desc(desc, cfhsi, &dump);
548 if (desc_pld_len == -ENOMEM)
549 goto restart;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000550 } else {
551 int pld_len;
552
Daniel Martensson687b13e2011-10-13 11:29:25 +0000553 if (!cfhsi->rx_state.piggy_desc) {
554 pld_len = cfhsi_rx_pld(desc, cfhsi, &dump);
555 if (pld_len == -ENOMEM)
556 goto restart;
557 cfhsi->rx_state.pld_len = pld_len;
558 } else {
559 pld_len = cfhsi->rx_state.pld_len;
560 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000561
562 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
563 struct cfhsi_desc *piggy_desc;
564 piggy_desc = (struct cfhsi_desc *)
565 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
566 pld_len);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000567 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000568
569 /* Extract piggy-backed descriptor. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000570 desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi, &dump);
571 if (desc_pld_len == -ENOMEM)
572 goto restart;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000573
574 /*
575 * Copy needed information from the piggy-backed
576 * descriptor to the descriptor in the start.
577 */
578 memcpy((u8 *)desc, (u8 *)piggy_desc,
579 CFHSI_DESC_SHORT_SZ);
580 }
581 }
582
Daniel Martensson687b13e2011-10-13 11:29:25 +0000583 if (unlikely(dump)) {
584 size_t rx_offset = cfhsi->rx_ptr - cfhsi->rx_buf;
585 dev_err(&cfhsi->ndev->dev, "%s: RX offset: %u.\n",
586 __func__, (unsigned) rx_offset);
587 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
588 cfhsi->rx_buf, cfhsi->rx_len + rx_offset);
589 }
590
591 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000592 if (desc_pld_len) {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000593 cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000594 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
595 cfhsi->rx_len = desc_pld_len;
596 } else {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000597 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000598 cfhsi->rx_ptr = cfhsi->rx_buf;
599 cfhsi->rx_len = CFHSI_DESC_SZ;
600 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000601
602 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
603 /* Set up new transfer. */
604 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
605 __func__);
606 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
607 cfhsi->dev);
608 if (WARN_ON(res < 0)) {
609 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
610 __func__, res);
611 cfhsi->ndev->stats.rx_errors++;
612 cfhsi->ndev->stats.rx_dropped++;
613 }
614 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000615 return;
616
617restart:
618 if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
619 dev_err(&cfhsi->ndev->dev, "%s: No memory available "
620 "in %d iterations.\n",
621 __func__, CFHSI_MAX_RX_RETRIES);
622 BUG();
623 }
624 mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
625}
626
627static void cfhsi_rx_slowpath(unsigned long arg)
628{
629 struct cfhsi *cfhsi = (struct cfhsi *)arg;
630
631 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
632 __func__);
633
634 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000635}
636
637static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
638{
639 struct cfhsi *cfhsi;
640
641 cfhsi = container_of(drv, struct cfhsi, drv);
642 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
643 __func__);
644
645 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
646 return;
647
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000648 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
649 wake_up_interruptible(&cfhsi->flush_fifo_wait);
650 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000651 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000652}
653
654static void cfhsi_wake_up(struct work_struct *work)
655{
656 struct cfhsi *cfhsi = NULL;
657 int res;
658 int len;
659 long ret;
660
661 cfhsi = container_of(work, struct cfhsi, wake_up_work);
662
663 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
664 return;
665
666 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
667 /* It happenes when wakeup is requested by
668 * both ends at the same time. */
669 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
670 return;
671 }
672
673 /* Activate wake line. */
674 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
675
676 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
677 __func__);
678
679 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000680 ret = CFHSI_WAKE_TOUT;
681 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
682 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000683 &cfhsi->bits), ret);
684 if (unlikely(ret < 0)) {
685 /* Interrupted by signal. */
686 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
687 __func__, ret);
688 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
689 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
690 return;
691 } else if (!ret) {
692 /* Wakeup timeout */
693 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
694 __func__);
695 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
696 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
697 return;
698 }
699 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
700 __func__);
701
702 /* Clear power up bit. */
703 set_bit(CFHSI_AWAKE, &cfhsi->bits);
704 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
705
706 /* Resume read operation. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000707 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
708 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
709
710 if (WARN_ON(res < 0))
711 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000712
713 /* Clear power up acknowledment. */
714 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
715
716 spin_lock_bh(&cfhsi->lock);
717
718 /* Resume transmit if queue is not empty. */
719 if (!skb_peek(&cfhsi->qhead)) {
720 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
721 __func__);
722 /* Start inactivity timer. */
723 mod_timer(&cfhsi->timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000724 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000725 spin_unlock_bh(&cfhsi->lock);
726 return;
727 }
728
729 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
730 __func__);
731
732 spin_unlock_bh(&cfhsi->lock);
733
734 /* Create HSI frame. */
735 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
736
737 if (likely(len > 0)) {
738 /* Set up new transfer. */
739 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
740 if (WARN_ON(res < 0)) {
741 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
742 __func__, res);
743 cfhsi_abort_tx(cfhsi);
744 }
745 } else {
746 dev_err(&cfhsi->ndev->dev,
747 "%s: Failed to create HSI frame: %d.\n",
748 __func__, len);
749 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000750}
751
752static void cfhsi_wake_down(struct work_struct *work)
753{
754 long ret;
755 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000756 size_t fifo_occupancy = 0;
757 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000758
759 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000760 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000761
762 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
763 return;
764
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000765 /* Deactivate wake line. */
766 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
767
768 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000769 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000770 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000771 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
772 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000773 if (ret < 0) {
774 /* Interrupted by signal. */
775 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
776 __func__, ret);
777 return;
778 } else if (!ret) {
779 /* Timeout */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000780 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000781 }
782
Daniel Martensson687b13e2011-10-13 11:29:25 +0000783 /* Check FIFO occupancy. */
784 while (retry) {
785 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
786 &fifo_occupancy));
787
788 if (!fifo_occupancy)
789 break;
790
791 set_current_state(TASK_INTERRUPTIBLE);
792 schedule_timeout(1);
793 retry--;
794 }
795
796 if (!retry)
797 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
798
799 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000800 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
801
Daniel Martensson687b13e2011-10-13 11:29:25 +0000802 /* Cancel pending RX requests. */
803 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000804
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000805}
806
807static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
808{
809 struct cfhsi *cfhsi = NULL;
810
811 cfhsi = container_of(drv, struct cfhsi, drv);
812 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
813 __func__);
814
815 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
816 wake_up_interruptible(&cfhsi->wake_up_wait);
817
818 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
819 return;
820
821 /* Schedule wake up work queue if the peer initiates. */
822 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
823 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
824}
825
826static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
827{
828 struct cfhsi *cfhsi = NULL;
829
830 cfhsi = container_of(drv, struct cfhsi, drv);
831 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
832 __func__);
833
834 /* Initiating low power is only permitted by the host (us). */
835 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
836 wake_up_interruptible(&cfhsi->wake_down_wait);
837}
838
839static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
840{
841 struct cfhsi *cfhsi = NULL;
842 int start_xfer = 0;
843 int timer_active;
844
845 if (!dev)
846 return -EINVAL;
847
848 cfhsi = netdev_priv(dev);
849
850 spin_lock_bh(&cfhsi->lock);
851
852 skb_queue_tail(&cfhsi->qhead, skb);
853
854 /* Sanity check; xmit should not be called after unregister_netdev */
855 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
856 spin_unlock_bh(&cfhsi->lock);
857 cfhsi_abort_tx(cfhsi);
858 return -EINVAL;
859 }
860
861 /* Send flow off if number of packets is above high water mark. */
862 if (!cfhsi->flow_off_sent &&
863 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
864 cfhsi->cfdev.flowctrl) {
865 cfhsi->flow_off_sent = 1;
866 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
867 }
868
869 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
870 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
871 start_xfer = 1;
872 }
873
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000874 if (!start_xfer) {
875 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000876 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000877 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000878
879 /* Delete inactivity timer if started. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000880 timer_active = del_timer_sync(&cfhsi->timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000881
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000882 spin_unlock_bh(&cfhsi->lock);
883
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000884 if (timer_active) {
885 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
886 int len;
887 int res;
888
889 /* Create HSI frame. */
890 len = cfhsi_tx_frm(desc, cfhsi);
891 BUG_ON(!len);
892
893 /* Set up new transfer. */
894 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
895 if (WARN_ON(res < 0)) {
896 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
897 __func__, res);
898 cfhsi_abort_tx(cfhsi);
899 }
900 } else {
901 /* Schedule wake up work queue if the we initiate. */
902 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
903 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
904 }
905
906 return 0;
907}
908
909static int cfhsi_open(struct net_device *dev)
910{
911 netif_wake_queue(dev);
912
913 return 0;
914}
915
916static int cfhsi_close(struct net_device *dev)
917{
918 netif_stop_queue(dev);
919
920 return 0;
921}
922
923static const struct net_device_ops cfhsi_ops = {
924 .ndo_open = cfhsi_open,
925 .ndo_stop = cfhsi_close,
926 .ndo_start_xmit = cfhsi_xmit
927};
928
929static void cfhsi_setup(struct net_device *dev)
930{
931 struct cfhsi *cfhsi = netdev_priv(dev);
932 dev->features = 0;
933 dev->netdev_ops = &cfhsi_ops;
934 dev->type = ARPHRD_CAIF;
935 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
936 dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
937 dev->tx_queue_len = 0;
938 dev->destructor = free_netdev;
939 skb_queue_head_init(&cfhsi->qhead);
940 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
941 cfhsi->cfdev.use_frag = false;
942 cfhsi->cfdev.use_stx = false;
943 cfhsi->cfdev.use_fcs = false;
944 cfhsi->ndev = dev;
945}
946
947int cfhsi_probe(struct platform_device *pdev)
948{
949 struct cfhsi *cfhsi = NULL;
950 struct net_device *ndev;
951 struct cfhsi_dev *dev;
952 int res;
953
954 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
Joe Perches7ac2ed02011-08-25 13:22:24 +0000955 if (!ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000956 return -ENODEV;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000957
958 cfhsi = netdev_priv(ndev);
959 cfhsi->ndev = ndev;
960 cfhsi->pdev = pdev;
961
962 /* Initialize state vaiables. */
963 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000964 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000965
966 /* Set flow info */
967 cfhsi->flow_off_sent = 0;
968 cfhsi->q_low_mark = LOW_WATER_MARK;
969 cfhsi->q_high_mark = HIGH_WATER_MARK;
970
971 /* Assign the HSI device. */
972 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
973 cfhsi->dev = dev;
974
975 /* Assign the driver to this HSI device. */
976 dev->drv = &cfhsi->drv;
977
978 /*
979 * Allocate a TX buffer with the size of a HSI packet descriptors
980 * and the necessary room for CAIF payload frames.
981 */
982 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
983 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000984 res = -ENODEV;
985 goto err_alloc_tx;
986 }
987
988 /*
989 * Allocate a RX buffer with the size of two HSI packet descriptors and
990 * the necessary room for CAIF payload frames.
991 */
992 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
993 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000994 res = -ENODEV;
995 goto err_alloc_rx;
996 }
997
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000998 /* Pre-calculate inactivity timeout. */
999 if (inactivity_timeout != -1) {
1000 cfhsi->inactivity_timeout =
1001 inactivity_timeout * HZ / 1000;
1002 if (!cfhsi->inactivity_timeout)
1003 cfhsi->inactivity_timeout = 1;
1004 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1005 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1006 } else {
1007 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1008 }
1009
1010 /* Initialize recieve vaiables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001011 cfhsi->rx_ptr = cfhsi->rx_buf;
1012 cfhsi->rx_len = CFHSI_DESC_SZ;
1013
1014 /* Initialize spin locks. */
1015 spin_lock_init(&cfhsi->lock);
1016
1017 /* Set up the driver. */
1018 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1019 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +00001020 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1021 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001022
1023 /* Initialize the work queues. */
1024 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1025 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001026
1027 /* Clear all bit fields. */
1028 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1029 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1030 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1031 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001032
1033 /* Create work thread. */
1034 cfhsi->wq = create_singlethread_workqueue(pdev->name);
1035 if (!cfhsi->wq) {
1036 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1037 __func__);
1038 res = -ENODEV;
1039 goto err_create_wq;
1040 }
1041
1042 /* Initialize wait queues. */
1043 init_waitqueue_head(&cfhsi->wake_up_wait);
1044 init_waitqueue_head(&cfhsi->wake_down_wait);
1045 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1046
1047 /* Setup the inactivity timer. */
1048 init_timer(&cfhsi->timer);
1049 cfhsi->timer.data = (unsigned long)cfhsi;
1050 cfhsi->timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001051 /* Setup the slowpath RX timer. */
1052 init_timer(&cfhsi->rx_slowpath_timer);
1053 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1054 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001055
1056 /* Add CAIF HSI device to list. */
1057 spin_lock(&cfhsi_list_lock);
1058 list_add_tail(&cfhsi->list, &cfhsi_list);
1059 spin_unlock(&cfhsi_list_lock);
1060
1061 /* Activate HSI interface. */
1062 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1063 if (res) {
1064 dev_err(&cfhsi->ndev->dev,
1065 "%s: can't activate HSI interface: %d.\n",
1066 __func__, res);
1067 goto err_activate;
1068 }
1069
1070 /* Flush FIFO */
1071 res = cfhsi_flush_fifo(cfhsi);
1072 if (res) {
1073 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1074 __func__, res);
1075 goto err_net_reg;
1076 }
1077
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001078 /* Register network device. */
1079 res = register_netdev(ndev);
1080 if (res) {
1081 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1082 __func__, res);
1083 goto err_net_reg;
1084 }
1085
1086 netif_stop_queue(ndev);
1087
1088 return res;
1089
1090 err_net_reg:
1091 cfhsi->dev->cfhsi_down(cfhsi->dev);
1092 err_activate:
1093 destroy_workqueue(cfhsi->wq);
1094 err_create_wq:
1095 kfree(cfhsi->rx_buf);
1096 err_alloc_rx:
1097 kfree(cfhsi->tx_buf);
1098 err_alloc_tx:
1099 free_netdev(ndev);
1100
1101 return res;
1102}
1103
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001104static void cfhsi_shutdown(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001105{
1106 u8 *tx_buf, *rx_buf;
1107
1108 /* Stop TXing */
1109 netif_tx_stop_all_queues(cfhsi->ndev);
1110
1111 /* going to shutdown driver */
1112 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1113
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001114 /* Flush workqueue */
1115 flush_workqueue(cfhsi->wq);
1116
Daniel Martensson687b13e2011-10-13 11:29:25 +00001117 /* Delete timers if pending */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001118 del_timer_sync(&cfhsi->timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001119 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001120
1121 /* Cancel pending RX request (if any) */
1122 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1123
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001124 /* Destroy workqueue */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001125 destroy_workqueue(cfhsi->wq);
1126
1127 /* Store bufferes: will be freed later. */
1128 tx_buf = cfhsi->tx_buf;
1129 rx_buf = cfhsi->rx_buf;
1130
1131 /* Flush transmit queues. */
1132 cfhsi_abort_tx(cfhsi);
1133
1134 /* Deactivate interface */
1135 cfhsi->dev->cfhsi_down(cfhsi->dev);
1136
1137 /* Finally unregister the network device. */
1138 unregister_netdev(cfhsi->ndev);
1139
1140 /* Free buffers. */
1141 kfree(tx_buf);
1142 kfree(rx_buf);
1143}
1144
1145int cfhsi_remove(struct platform_device *pdev)
1146{
1147 struct list_head *list_node;
1148 struct list_head *n;
1149 struct cfhsi *cfhsi = NULL;
1150 struct cfhsi_dev *dev;
1151
1152 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1153 spin_lock(&cfhsi_list_lock);
1154 list_for_each_safe(list_node, n, &cfhsi_list) {
1155 cfhsi = list_entry(list_node, struct cfhsi, list);
1156 /* Find the corresponding device. */
1157 if (cfhsi->dev == dev) {
1158 /* Remove from list. */
1159 list_del(list_node);
1160 spin_unlock(&cfhsi_list_lock);
1161
1162 /* Shutdown driver. */
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001163 cfhsi_shutdown(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001164
1165 return 0;
1166 }
1167 }
1168 spin_unlock(&cfhsi_list_lock);
1169 return -ENODEV;
1170}
1171
1172struct platform_driver cfhsi_plat_drv = {
1173 .probe = cfhsi_probe,
1174 .remove = cfhsi_remove,
1175 .driver = {
1176 .name = "cfhsi",
1177 .owner = THIS_MODULE,
1178 },
1179};
1180
1181static void __exit cfhsi_exit_module(void)
1182{
1183 struct list_head *list_node;
1184 struct list_head *n;
1185 struct cfhsi *cfhsi = NULL;
1186
1187 spin_lock(&cfhsi_list_lock);
1188 list_for_each_safe(list_node, n, &cfhsi_list) {
1189 cfhsi = list_entry(list_node, struct cfhsi, list);
1190
1191 /* Remove from list. */
1192 list_del(list_node);
1193 spin_unlock(&cfhsi_list_lock);
1194
1195 /* Shutdown driver. */
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001196 cfhsi_shutdown(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001197
1198 spin_lock(&cfhsi_list_lock);
1199 }
1200 spin_unlock(&cfhsi_list_lock);
1201
1202 /* Unregister platform driver. */
1203 platform_driver_unregister(&cfhsi_plat_drv);
1204}
1205
1206static int __init cfhsi_init_module(void)
1207{
1208 int result;
1209
1210 /* Initialize spin lock. */
1211 spin_lock_init(&cfhsi_list_lock);
1212
1213 /* Register platform driver. */
1214 result = platform_driver_register(&cfhsi_plat_drv);
1215 if (result) {
1216 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1217 result);
1218 goto err_dev_register;
1219 }
1220
1221 return result;
1222
1223 err_dev_register:
1224 return result;
1225}
1226
1227module_init(cfhsi_init_module);
1228module_exit(cfhsi_exit_module);