blob: e9e7cbf01a5f56207d38b40079d26f9f5545ccac [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00009#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/string.h>
15#include <linux/list.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/sched.h>
19#include <linux/if_arp.h>
20#include <linux/timer.h>
Daniel Martensson5bbed922011-10-13 11:29:28 +000021#include <linux/rtnetlink.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000022#include <net/caif/caif_layer.h>
23#include <net/caif/caif_hsi.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27MODULE_DESCRIPTION("CAIF HSI driver");
28
29/* Returns the number of padding bytes for alignment. */
30#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31 (((pow)-((x)&((pow)-1)))))
32
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +000033static int inactivity_timeout = 1000;
34module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000037/*
38 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero !
40 */
41static int hsi_head_align = 4;
42module_param(hsi_head_align, int, S_IRUGO);
43MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
44
45static int hsi_tail_align = 4;
46module_param(hsi_tail_align, int, S_IRUGO);
47MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
48
49/*
50 * HSI link layer flowcontrol thresholds.
51 * Warning: A high threshold value migth increase throughput but it will at
52 * the same time prevent channel prioritization and increase the risk of
53 * flooding the modem. The high threshold should be above the low.
54 */
55static int hsi_high_threshold = 100;
56module_param(hsi_high_threshold, int, S_IRUGO);
57MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
58
59static int hsi_low_threshold = 50;
60module_param(hsi_low_threshold, int, S_IRUGO);
61MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
62
63#define ON 1
64#define OFF 0
65
66/*
67 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
68 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
69 * de-asserted before the number of packets drops below LOW_WATER_MARK.
70 */
71#define LOW_WATER_MARK hsi_low_threshold
72#define HIGH_WATER_MARK hsi_high_threshold
73
74static LIST_HEAD(cfhsi_list);
75static spinlock_t cfhsi_list_lock;
76
77static void cfhsi_inactivity_tout(unsigned long arg)
78{
79 struct cfhsi *cfhsi = (struct cfhsi *)arg;
80
81 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
82 __func__);
83
84 /* Schedule power down work queue. */
85 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87}
88
89static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90{
91 struct sk_buff *skb;
92
93 for (;;) {
94 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead);
96 if (!skb)
97 break;
98
99 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++;
101 spin_unlock_bh(&cfhsi->lock);
102 kfree_skb(skb);
103 }
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000106 mod_timer(&cfhsi->timer,
107 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000108 spin_unlock_bh(&cfhsi->lock);
109}
110
111static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
112{
113 char buffer[32]; /* Any reasonable value */
114 size_t fifo_occupancy;
115 int ret;
116
117 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
118 __func__);
119
120
121 ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
122 if (ret) {
123 dev_warn(&cfhsi->ndev->dev,
124 "%s: can't wake up HSI interface: %d.\n",
125 __func__, ret);
126 return ret;
127 }
128
129 do {
130 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
131 &fifo_occupancy);
132 if (ret) {
133 dev_warn(&cfhsi->ndev->dev,
134 "%s: can't get FIFO occupancy: %d.\n",
135 __func__, ret);
136 break;
137 } else if (!fifo_occupancy)
138 /* No more data, exitting normally */
139 break;
140
141 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
142 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
143 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
144 cfhsi->dev);
145 if (ret) {
146 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
147 dev_warn(&cfhsi->ndev->dev,
148 "%s: can't read data: %d.\n",
149 __func__, ret);
150 break;
151 }
152
153 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000154 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000155 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
156
157 if (ret < 0) {
158 dev_warn(&cfhsi->ndev->dev,
159 "%s: can't wait for flush complete: %d.\n",
160 __func__, ret);
161 break;
162 } else if (!ret) {
163 ret = -ETIMEDOUT;
164 dev_warn(&cfhsi->ndev->dev,
165 "%s: timeout waiting for flush complete.\n",
166 __func__);
167 break;
168 }
169 } while (1);
170
171 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
172
173 return ret;
174}
175
176static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
177{
178 int nfrms = 0;
179 int pld_len = 0;
180 struct sk_buff *skb;
181 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
182
183 skb = skb_dequeue(&cfhsi->qhead);
184 if (!skb)
185 return 0;
186
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000187 /* Clear offset. */
188 desc->offset = 0;
189
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000190 /* Check if we can embed a CAIF frame. */
191 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
192 struct caif_payload_info *info;
193 int hpad = 0;
194 int tpad = 0;
195
196 /* Calculate needed head alignment and tail alignment. */
197 info = (struct caif_payload_info *)&skb->cb;
198
199 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
200 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
201
202 /* Check if frame still fits with added alignment. */
203 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
204 u8 *pemb = desc->emb_frm;
205 desc->offset = CFHSI_DESC_SHORT_SZ;
206 *pemb = (u8)(hpad - 1);
207 pemb += hpad;
208
209 /* Update network statistics. */
210 cfhsi->ndev->stats.tx_packets++;
211 cfhsi->ndev->stats.tx_bytes += skb->len;
212
213 /* Copy in embedded CAIF frame. */
214 skb_copy_bits(skb, 0, pemb, skb->len);
215 consume_skb(skb);
216 skb = NULL;
217 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000218 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000219
220 /* Create payload CAIF frames. */
221 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
222 while (nfrms < CFHSI_MAX_PKTS) {
223 struct caif_payload_info *info;
224 int hpad = 0;
225 int tpad = 0;
226
227 if (!skb)
228 skb = skb_dequeue(&cfhsi->qhead);
229
230 if (!skb)
231 break;
232
233 /* Calculate needed head alignment and tail alignment. */
234 info = (struct caif_payload_info *)&skb->cb;
235
236 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
237 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
238
239 /* Fill in CAIF frame length in descriptor. */
240 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
241
242 /* Fill head padding information. */
243 *pfrm = (u8)(hpad - 1);
244 pfrm += hpad;
245
246 /* Update network statistics. */
247 cfhsi->ndev->stats.tx_packets++;
248 cfhsi->ndev->stats.tx_bytes += skb->len;
249
250 /* Copy in CAIF frame. */
251 skb_copy_bits(skb, 0, pfrm, skb->len);
252
253 /* Update payload length. */
254 pld_len += desc->cffrm_len[nfrms];
255
256 /* Update frame pointer. */
257 pfrm += skb->len + tpad;
258 consume_skb(skb);
259 skb = NULL;
260
261 /* Update number of frames. */
262 nfrms++;
263 }
264
265 /* Unused length fields should be zero-filled (according to SPEC). */
266 while (nfrms < CFHSI_MAX_PKTS) {
267 desc->cffrm_len[nfrms] = 0x0000;
268 nfrms++;
269 }
270
271 /* Check if we can piggy-back another descriptor. */
272 skb = skb_peek(&cfhsi->qhead);
273 if (skb)
274 desc->header |= CFHSI_PIGGY_DESC;
275 else
276 desc->header &= ~CFHSI_PIGGY_DESC;
277
278 return CFHSI_DESC_SZ + pld_len;
279}
280
Daniel Martensson687b13e2011-10-13 11:29:25 +0000281static void cfhsi_tx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000282{
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000283 struct cfhsi_desc *desc = NULL;
284 int len = 0;
285 int res;
286
Daniel Martensson687b13e2011-10-13 11:29:25 +0000287 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000288
289 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
290 return;
291
292 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
293
294 do {
295 /*
296 * Send flow on if flow off has been previously signalled
297 * and number of packets is below low water mark.
298 */
299 spin_lock_bh(&cfhsi->lock);
300 if (cfhsi->flow_off_sent &&
301 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
302 cfhsi->cfdev.flowctrl) {
303
304 cfhsi->flow_off_sent = 0;
305 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
306 }
307 spin_unlock_bh(&cfhsi->lock);
308
309 /* Create HSI frame. */
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000310 do {
311 len = cfhsi_tx_frm(desc, cfhsi);
312 if (!len) {
313 spin_lock_bh(&cfhsi->lock);
314 if (unlikely(skb_peek(&cfhsi->qhead))) {
315 spin_unlock_bh(&cfhsi->lock);
316 continue;
317 }
318 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
319 /* Start inactivity timer. */
320 mod_timer(&cfhsi->timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000321 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000322 spin_unlock_bh(&cfhsi->lock);
323 goto done;
324 }
325 } while (!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000326
327 /* Set up new transfer. */
328 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
329 if (WARN_ON(res < 0)) {
330 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
331 __func__, res);
332 }
333 } while (res < 0);
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000334
335done:
336 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000337}
338
339static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
340{
341 struct cfhsi *cfhsi;
342
343 cfhsi = container_of(drv, struct cfhsi, drv);
344 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
345 __func__);
346
347 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
348 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000349 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000350}
351
Daniel Martensson5bbed922011-10-13 11:29:28 +0000352static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000353{
354 int xfer_sz = 0;
355 int nfrms = 0;
356 u16 *plen = NULL;
357 u8 *pfrm = NULL;
358
359 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
360 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
361 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
362 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000363 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000364 }
365
366 /* Check for embedded CAIF frame. */
367 if (desc->offset) {
368 struct sk_buff *skb;
369 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000370 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000371 pfrm = ((u8 *)desc) + desc->offset;
372
373 /* Remove offset padding. */
374 pfrm += *pfrm + 1;
375
376 /* Read length of CAIF frame (little endian). */
377 len = *pfrm;
378 len |= ((*(pfrm+1)) << 8) & 0xFF00;
379 len += 2; /* Add FCS fields. */
380
Daniel Martensson5bbed922011-10-13 11:29:28 +0000381 /* Sanity check length of CAIF frame. */
382 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
383 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
384 __func__);
385 return -EPROTO;
386 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000387
388 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000389 skb = alloc_skb(len + 1, GFP_ATOMIC);
390 if (!skb) {
391 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
392 __func__);
393 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000394 }
395 caif_assert(skb != NULL);
396
397 dst = skb_put(skb, len);
398 memcpy(dst, pfrm, len);
399
400 skb->protocol = htons(ETH_P_CAIF);
401 skb_reset_mac_header(skb);
402 skb->dev = cfhsi->ndev;
403
404 /*
405 * We are called from a arch specific platform device.
406 * Unfortunately we don't know what context we're
407 * running in.
408 */
409 if (in_interrupt())
410 netif_rx(skb);
411 else
412 netif_rx_ni(skb);
413
414 /* Update network statistics. */
415 cfhsi->ndev->stats.rx_packets++;
416 cfhsi->ndev->stats.rx_bytes += len;
417 }
418
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000419 /* Calculate transfer length. */
420 plen = desc->cffrm_len;
421 while (nfrms < CFHSI_MAX_PKTS && *plen) {
422 xfer_sz += *plen;
423 plen++;
424 nfrms++;
425 }
426
427 /* Check for piggy-backed descriptor. */
428 if (desc->header & CFHSI_PIGGY_DESC)
429 xfer_sz += CFHSI_DESC_SZ;
430
Daniel Martensson5bbed922011-10-13 11:29:28 +0000431 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000432 dev_err(&cfhsi->ndev->dev,
433 "%s: Invalid payload len: %d, ignored.\n",
434 __func__, xfer_sz);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000435 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000436 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000437 return xfer_sz;
438}
439
Daniel Martensson5bbed922011-10-13 11:29:28 +0000440static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000441{
442 int rx_sz = 0;
443 int nfrms = 0;
444 u16 *plen = NULL;
445 u8 *pfrm = NULL;
446
447 /* Sanity check header and offset. */
448 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
449 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
450 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
451 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000452 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000453 }
454
455 /* Set frame pointer to start of payload. */
456 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
457 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000458
459 /* Skip already processed frames. */
460 while (nfrms < cfhsi->rx_state.nfrms) {
461 pfrm += *plen;
462 rx_sz += *plen;
463 plen++;
464 nfrms++;
465 }
466
467 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000468 while (nfrms < CFHSI_MAX_PKTS && *plen) {
469 struct sk_buff *skb;
470 u8 *dst = NULL;
471 u8 *pcffrm = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000472 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000473
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000474 /* CAIF frame starts after head padding. */
475 pcffrm = pfrm + *pfrm + 1;
476
477 /* Read length of CAIF frame (little endian). */
478 len = *pcffrm;
479 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
480 len += 2; /* Add FCS fields. */
481
Daniel Martensson5bbed922011-10-13 11:29:28 +0000482 /* Sanity check length of CAIF frames. */
483 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
484 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
485 __func__);
486 return -EPROTO;
487 }
488
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000489 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000490 skb = alloc_skb(len + 1, GFP_ATOMIC);
491 if (!skb) {
492 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
493 __func__);
494 cfhsi->rx_state.nfrms = nfrms;
495 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000496 }
497 caif_assert(skb != NULL);
498
499 dst = skb_put(skb, len);
500 memcpy(dst, pcffrm, len);
501
502 skb->protocol = htons(ETH_P_CAIF);
503 skb_reset_mac_header(skb);
504 skb->dev = cfhsi->ndev;
505
506 /*
507 * We're called from a platform device,
508 * and don't know the context we're running in.
509 */
510 if (in_interrupt())
511 netif_rx(skb);
512 else
513 netif_rx_ni(skb);
514
515 /* Update network statistics. */
516 cfhsi->ndev->stats.rx_packets++;
517 cfhsi->ndev->stats.rx_bytes += len;
518
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000519 pfrm += *plen;
520 rx_sz += *plen;
521 plen++;
522 nfrms++;
523 }
524
525 return rx_sz;
526}
527
Daniel Martensson687b13e2011-10-13 11:29:25 +0000528static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000529{
530 int res;
531 int desc_pld_len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000532 struct cfhsi_desc *desc = NULL;
533
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000534 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
535
Daniel Martensson687b13e2011-10-13 11:29:25 +0000536 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000537
538 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
539 return;
540
541 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000542 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000543 mod_timer_pending(&cfhsi->timer,
544 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000545 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000546
Daniel Martensson687b13e2011-10-13 11:29:25 +0000547 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
Daniel Martensson5bbed922011-10-13 11:29:28 +0000548 desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000549 if (desc_pld_len == -ENOMEM)
550 goto restart;
Daniel Martensson5bbed922011-10-13 11:29:28 +0000551 if (desc_pld_len == -EPROTO)
552 goto out_of_sync;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000553 } else {
554 int pld_len;
555
Daniel Martensson687b13e2011-10-13 11:29:25 +0000556 if (!cfhsi->rx_state.piggy_desc) {
Daniel Martensson5bbed922011-10-13 11:29:28 +0000557 pld_len = cfhsi_rx_pld(desc, cfhsi);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000558 if (pld_len == -ENOMEM)
559 goto restart;
Daniel Martensson5bbed922011-10-13 11:29:28 +0000560 if (pld_len == -EPROTO)
561 goto out_of_sync;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000562 cfhsi->rx_state.pld_len = pld_len;
563 } else {
564 pld_len = cfhsi->rx_state.pld_len;
565 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000566
567 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
568 struct cfhsi_desc *piggy_desc;
569 piggy_desc = (struct cfhsi_desc *)
570 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
571 pld_len);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000572 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000573
574 /* Extract piggy-backed descriptor. */
Daniel Martensson5bbed922011-10-13 11:29:28 +0000575 desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000576 if (desc_pld_len == -ENOMEM)
577 goto restart;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000578
579 /*
580 * Copy needed information from the piggy-backed
581 * descriptor to the descriptor in the start.
582 */
583 memcpy((u8 *)desc, (u8 *)piggy_desc,
584 CFHSI_DESC_SHORT_SZ);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000585
Daniel Martensson5bbed922011-10-13 11:29:28 +0000586 if (desc_pld_len == -EPROTO)
587 goto out_of_sync;
588 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000589 }
590
591 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000592 if (desc_pld_len) {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000593 cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000594 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
595 cfhsi->rx_len = desc_pld_len;
596 } else {
Daniel Martensson687b13e2011-10-13 11:29:25 +0000597 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000598 cfhsi->rx_ptr = cfhsi->rx_buf;
599 cfhsi->rx_len = CFHSI_DESC_SZ;
600 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000601
602 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
603 /* Set up new transfer. */
604 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
605 __func__);
606 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
607 cfhsi->dev);
608 if (WARN_ON(res < 0)) {
609 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
610 __func__, res);
611 cfhsi->ndev->stats.rx_errors++;
612 cfhsi->ndev->stats.rx_dropped++;
613 }
614 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000615 return;
616
617restart:
618 if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
619 dev_err(&cfhsi->ndev->dev, "%s: No memory available "
620 "in %d iterations.\n",
621 __func__, CFHSI_MAX_RX_RETRIES);
622 BUG();
623 }
624 mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000625 return;
626
627out_of_sync:
628 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
629 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
630 cfhsi->rx_buf, CFHSI_DESC_SZ);
631 schedule_work(&cfhsi->out_of_sync_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000632}
633
634static void cfhsi_rx_slowpath(unsigned long arg)
635{
636 struct cfhsi *cfhsi = (struct cfhsi *)arg;
637
638 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
639 __func__);
640
641 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000642}
643
644static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
645{
646 struct cfhsi *cfhsi;
647
648 cfhsi = container_of(drv, struct cfhsi, drv);
649 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
650 __func__);
651
652 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
653 return;
654
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000655 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
656 wake_up_interruptible(&cfhsi->flush_fifo_wait);
657 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000658 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000659}
660
661static void cfhsi_wake_up(struct work_struct *work)
662{
663 struct cfhsi *cfhsi = NULL;
664 int res;
665 int len;
666 long ret;
667
668 cfhsi = container_of(work, struct cfhsi, wake_up_work);
669
670 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
671 return;
672
673 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
674 /* It happenes when wakeup is requested by
675 * both ends at the same time. */
676 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
677 return;
678 }
679
680 /* Activate wake line. */
681 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
682
683 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
684 __func__);
685
686 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000687 ret = CFHSI_WAKE_TOUT;
688 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
689 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000690 &cfhsi->bits), ret);
691 if (unlikely(ret < 0)) {
692 /* Interrupted by signal. */
693 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
694 __func__, ret);
695 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
696 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
697 return;
698 } else if (!ret) {
699 /* Wakeup timeout */
700 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
701 __func__);
702 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
703 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
704 return;
705 }
706 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
707 __func__);
708
709 /* Clear power up bit. */
710 set_bit(CFHSI_AWAKE, &cfhsi->bits);
711 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
712
713 /* Resume read operation. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000714 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
715 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
716
717 if (WARN_ON(res < 0))
718 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000719
720 /* Clear power up acknowledment. */
721 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
722
723 spin_lock_bh(&cfhsi->lock);
724
725 /* Resume transmit if queue is not empty. */
726 if (!skb_peek(&cfhsi->qhead)) {
727 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
728 __func__);
729 /* Start inactivity timer. */
730 mod_timer(&cfhsi->timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000731 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000732 spin_unlock_bh(&cfhsi->lock);
733 return;
734 }
735
736 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
737 __func__);
738
739 spin_unlock_bh(&cfhsi->lock);
740
741 /* Create HSI frame. */
742 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
743
744 if (likely(len > 0)) {
745 /* Set up new transfer. */
746 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
747 if (WARN_ON(res < 0)) {
748 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
749 __func__, res);
750 cfhsi_abort_tx(cfhsi);
751 }
752 } else {
753 dev_err(&cfhsi->ndev->dev,
754 "%s: Failed to create HSI frame: %d.\n",
755 __func__, len);
756 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000757}
758
759static void cfhsi_wake_down(struct work_struct *work)
760{
761 long ret;
762 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000763 size_t fifo_occupancy = 0;
764 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000765
766 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000767 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000768
769 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
770 return;
771
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000772 /* Deactivate wake line. */
773 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
774
775 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000776 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000777 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000778 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
779 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000780 if (ret < 0) {
781 /* Interrupted by signal. */
782 dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
783 __func__, ret);
784 return;
785 } else if (!ret) {
786 /* Timeout */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000787 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000788 }
789
Daniel Martensson687b13e2011-10-13 11:29:25 +0000790 /* Check FIFO occupancy. */
791 while (retry) {
792 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
793 &fifo_occupancy));
794
795 if (!fifo_occupancy)
796 break;
797
798 set_current_state(TASK_INTERRUPTIBLE);
799 schedule_timeout(1);
800 retry--;
801 }
802
803 if (!retry)
804 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
805
806 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000807 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
808
Daniel Martensson687b13e2011-10-13 11:29:25 +0000809 /* Cancel pending RX requests. */
810 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000811
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000812}
813
Daniel Martensson5bbed922011-10-13 11:29:28 +0000814static void cfhsi_out_of_sync(struct work_struct *work)
815{
816 struct cfhsi *cfhsi = NULL;
817
818 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
819
820 rtnl_lock();
821 dev_close(cfhsi->ndev);
822 rtnl_unlock();
823}
824
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000825static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
826{
827 struct cfhsi *cfhsi = NULL;
828
829 cfhsi = container_of(drv, struct cfhsi, drv);
830 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
831 __func__);
832
833 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
834 wake_up_interruptible(&cfhsi->wake_up_wait);
835
836 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
837 return;
838
839 /* Schedule wake up work queue if the peer initiates. */
840 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
841 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
842}
843
844static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
845{
846 struct cfhsi *cfhsi = NULL;
847
848 cfhsi = container_of(drv, struct cfhsi, drv);
849 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
850 __func__);
851
852 /* Initiating low power is only permitted by the host (us). */
853 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
854 wake_up_interruptible(&cfhsi->wake_down_wait);
855}
856
857static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
858{
859 struct cfhsi *cfhsi = NULL;
860 int start_xfer = 0;
861 int timer_active;
862
863 if (!dev)
864 return -EINVAL;
865
866 cfhsi = netdev_priv(dev);
867
868 spin_lock_bh(&cfhsi->lock);
869
870 skb_queue_tail(&cfhsi->qhead, skb);
871
872 /* Sanity check; xmit should not be called after unregister_netdev */
873 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
874 spin_unlock_bh(&cfhsi->lock);
875 cfhsi_abort_tx(cfhsi);
876 return -EINVAL;
877 }
878
879 /* Send flow off if number of packets is above high water mark. */
880 if (!cfhsi->flow_off_sent &&
881 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
882 cfhsi->cfdev.flowctrl) {
883 cfhsi->flow_off_sent = 1;
884 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
885 }
886
887 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
888 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
889 start_xfer = 1;
890 }
891
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000892 if (!start_xfer) {
893 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000894 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000895 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000896
897 /* Delete inactivity timer if started. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000898 timer_active = del_timer_sync(&cfhsi->timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000899
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000900 spin_unlock_bh(&cfhsi->lock);
901
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000902 if (timer_active) {
903 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
904 int len;
905 int res;
906
907 /* Create HSI frame. */
908 len = cfhsi_tx_frm(desc, cfhsi);
909 BUG_ON(!len);
910
911 /* Set up new transfer. */
912 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
913 if (WARN_ON(res < 0)) {
914 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
915 __func__, res);
916 cfhsi_abort_tx(cfhsi);
917 }
918 } else {
919 /* Schedule wake up work queue if the we initiate. */
920 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
921 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
922 }
923
924 return 0;
925}
926
927static int cfhsi_open(struct net_device *dev)
928{
929 netif_wake_queue(dev);
930
931 return 0;
932}
933
934static int cfhsi_close(struct net_device *dev)
935{
936 netif_stop_queue(dev);
937
938 return 0;
939}
940
941static const struct net_device_ops cfhsi_ops = {
942 .ndo_open = cfhsi_open,
943 .ndo_stop = cfhsi_close,
944 .ndo_start_xmit = cfhsi_xmit
945};
946
947static void cfhsi_setup(struct net_device *dev)
948{
949 struct cfhsi *cfhsi = netdev_priv(dev);
950 dev->features = 0;
951 dev->netdev_ops = &cfhsi_ops;
952 dev->type = ARPHRD_CAIF;
953 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
954 dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
955 dev->tx_queue_len = 0;
956 dev->destructor = free_netdev;
957 skb_queue_head_init(&cfhsi->qhead);
958 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
959 cfhsi->cfdev.use_frag = false;
960 cfhsi->cfdev.use_stx = false;
961 cfhsi->cfdev.use_fcs = false;
962 cfhsi->ndev = dev;
963}
964
965int cfhsi_probe(struct platform_device *pdev)
966{
967 struct cfhsi *cfhsi = NULL;
968 struct net_device *ndev;
969 struct cfhsi_dev *dev;
970 int res;
971
972 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
Joe Perches7ac2ed02011-08-25 13:22:24 +0000973 if (!ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000974 return -ENODEV;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000975
976 cfhsi = netdev_priv(ndev);
977 cfhsi->ndev = ndev;
978 cfhsi->pdev = pdev;
979
980 /* Initialize state vaiables. */
981 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000982 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000983
984 /* Set flow info */
985 cfhsi->flow_off_sent = 0;
986 cfhsi->q_low_mark = LOW_WATER_MARK;
987 cfhsi->q_high_mark = HIGH_WATER_MARK;
988
989 /* Assign the HSI device. */
990 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
991 cfhsi->dev = dev;
992
993 /* Assign the driver to this HSI device. */
994 dev->drv = &cfhsi->drv;
995
996 /*
997 * Allocate a TX buffer with the size of a HSI packet descriptors
998 * and the necessary room for CAIF payload frames.
999 */
1000 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1001 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001002 res = -ENODEV;
1003 goto err_alloc_tx;
1004 }
1005
1006 /*
1007 * Allocate a RX buffer with the size of two HSI packet descriptors and
1008 * the necessary room for CAIF payload frames.
1009 */
1010 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1011 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001012 res = -ENODEV;
1013 goto err_alloc_rx;
1014 }
1015
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001016 /* Pre-calculate inactivity timeout. */
1017 if (inactivity_timeout != -1) {
1018 cfhsi->inactivity_timeout =
1019 inactivity_timeout * HZ / 1000;
1020 if (!cfhsi->inactivity_timeout)
1021 cfhsi->inactivity_timeout = 1;
1022 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1023 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1024 } else {
1025 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1026 }
1027
1028 /* Initialize recieve vaiables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001029 cfhsi->rx_ptr = cfhsi->rx_buf;
1030 cfhsi->rx_len = CFHSI_DESC_SZ;
1031
1032 /* Initialize spin locks. */
1033 spin_lock_init(&cfhsi->lock);
1034
1035 /* Set up the driver. */
1036 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1037 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +00001038 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1039 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001040
1041 /* Initialize the work queues. */
1042 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1043 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Daniel Martensson5bbed922011-10-13 11:29:28 +00001044 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001045
1046 /* Clear all bit fields. */
1047 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1048 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1049 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1050 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001051
1052 /* Create work thread. */
1053 cfhsi->wq = create_singlethread_workqueue(pdev->name);
1054 if (!cfhsi->wq) {
1055 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1056 __func__);
1057 res = -ENODEV;
1058 goto err_create_wq;
1059 }
1060
1061 /* Initialize wait queues. */
1062 init_waitqueue_head(&cfhsi->wake_up_wait);
1063 init_waitqueue_head(&cfhsi->wake_down_wait);
1064 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1065
1066 /* Setup the inactivity timer. */
1067 init_timer(&cfhsi->timer);
1068 cfhsi->timer.data = (unsigned long)cfhsi;
1069 cfhsi->timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001070 /* Setup the slowpath RX timer. */
1071 init_timer(&cfhsi->rx_slowpath_timer);
1072 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1073 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001074
1075 /* Add CAIF HSI device to list. */
1076 spin_lock(&cfhsi_list_lock);
1077 list_add_tail(&cfhsi->list, &cfhsi_list);
1078 spin_unlock(&cfhsi_list_lock);
1079
1080 /* Activate HSI interface. */
1081 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1082 if (res) {
1083 dev_err(&cfhsi->ndev->dev,
1084 "%s: can't activate HSI interface: %d.\n",
1085 __func__, res);
1086 goto err_activate;
1087 }
1088
1089 /* Flush FIFO */
1090 res = cfhsi_flush_fifo(cfhsi);
1091 if (res) {
1092 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1093 __func__, res);
1094 goto err_net_reg;
1095 }
1096
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001097 /* Register network device. */
1098 res = register_netdev(ndev);
1099 if (res) {
1100 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1101 __func__, res);
1102 goto err_net_reg;
1103 }
1104
1105 netif_stop_queue(ndev);
1106
1107 return res;
1108
1109 err_net_reg:
1110 cfhsi->dev->cfhsi_down(cfhsi->dev);
1111 err_activate:
1112 destroy_workqueue(cfhsi->wq);
1113 err_create_wq:
1114 kfree(cfhsi->rx_buf);
1115 err_alloc_rx:
1116 kfree(cfhsi->tx_buf);
1117 err_alloc_tx:
1118 free_netdev(ndev);
1119
1120 return res;
1121}
1122
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001123static void cfhsi_shutdown(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001124{
1125 u8 *tx_buf, *rx_buf;
1126
1127 /* Stop TXing */
1128 netif_tx_stop_all_queues(cfhsi->ndev);
1129
1130 /* going to shutdown driver */
1131 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1132
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001133 /* Flush workqueue */
1134 flush_workqueue(cfhsi->wq);
1135
Daniel Martensson687b13e2011-10-13 11:29:25 +00001136 /* Delete timers if pending */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001137 del_timer_sync(&cfhsi->timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001138 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001139
1140 /* Cancel pending RX request (if any) */
1141 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1142
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001143 /* Destroy workqueue */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001144 destroy_workqueue(cfhsi->wq);
1145
1146 /* Store bufferes: will be freed later. */
1147 tx_buf = cfhsi->tx_buf;
1148 rx_buf = cfhsi->rx_buf;
1149
1150 /* Flush transmit queues. */
1151 cfhsi_abort_tx(cfhsi);
1152
1153 /* Deactivate interface */
1154 cfhsi->dev->cfhsi_down(cfhsi->dev);
1155
1156 /* Finally unregister the network device. */
1157 unregister_netdev(cfhsi->ndev);
1158
1159 /* Free buffers. */
1160 kfree(tx_buf);
1161 kfree(rx_buf);
1162}
1163
1164int cfhsi_remove(struct platform_device *pdev)
1165{
1166 struct list_head *list_node;
1167 struct list_head *n;
1168 struct cfhsi *cfhsi = NULL;
1169 struct cfhsi_dev *dev;
1170
1171 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1172 spin_lock(&cfhsi_list_lock);
1173 list_for_each_safe(list_node, n, &cfhsi_list) {
1174 cfhsi = list_entry(list_node, struct cfhsi, list);
1175 /* Find the corresponding device. */
1176 if (cfhsi->dev == dev) {
1177 /* Remove from list. */
1178 list_del(list_node);
1179 spin_unlock(&cfhsi_list_lock);
1180
1181 /* Shutdown driver. */
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001182 cfhsi_shutdown(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001183
1184 return 0;
1185 }
1186 }
1187 spin_unlock(&cfhsi_list_lock);
1188 return -ENODEV;
1189}
1190
1191struct platform_driver cfhsi_plat_drv = {
1192 .probe = cfhsi_probe,
1193 .remove = cfhsi_remove,
1194 .driver = {
1195 .name = "cfhsi",
1196 .owner = THIS_MODULE,
1197 },
1198};
1199
1200static void __exit cfhsi_exit_module(void)
1201{
1202 struct list_head *list_node;
1203 struct list_head *n;
1204 struct cfhsi *cfhsi = NULL;
1205
1206 spin_lock(&cfhsi_list_lock);
1207 list_for_each_safe(list_node, n, &cfhsi_list) {
1208 cfhsi = list_entry(list_node, struct cfhsi, list);
1209
1210 /* Remove from list. */
1211 list_del(list_node);
1212 spin_unlock(&cfhsi_list_lock);
1213
1214 /* Shutdown driver. */
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001215 cfhsi_shutdown(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001216
1217 spin_lock(&cfhsi_list_lock);
1218 }
1219 spin_unlock(&cfhsi_list_lock);
1220
1221 /* Unregister platform driver. */
1222 platform_driver_unregister(&cfhsi_plat_drv);
1223}
1224
1225static int __init cfhsi_init_module(void)
1226{
1227 int result;
1228
1229 /* Initialize spin lock. */
1230 spin_lock_init(&cfhsi_list_lock);
1231
1232 /* Register platform driver. */
1233 result = platform_driver_register(&cfhsi_plat_drv);
1234 if (result) {
1235 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1236 result);
1237 goto err_dev_register;
1238 }
1239
1240 return result;
1241
1242 err_dev_register:
1243 return result;
1244}
1245
1246module_init(cfhsi_init_module);
1247module_exit(cfhsi_exit_module);