blob: fd59e37e28a1966ce9fd910dc87c7b5d00c90c88 [file] [log] [blame]
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
Kim Lilliestierna XXe8329322010-11-30 09:11:22 +00009#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +000010
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16
17#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h>
19
20#define NR_TX_BUF 6
21#define NR_RX_BUF 6
22#define TX_BUF_SZ 0x2000
23#define RX_BUF_SZ 0x2000
24
25#define CAIF_NEEDED_HEADROOM 32
26
27#define CAIF_FLOW_ON 1
28#define CAIF_FLOW_OFF 0
29
30#define LOW_WATERMARK 3
31#define HIGH_WATERMARK 4
32
33/* Maximum number of CAIF buffers per shared memory buffer. */
34#define SHM_MAX_FRMS_PER_BUF 10
35
36/*
37 * Size in bytes of the descriptor area
38 * (With end of descriptor signalling)
39 */
40#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
41 sizeof(struct shm_pck_desc))
42
43/*
44 * Offset to the first CAIF frame within a shared memory buffer.
45 * Aligned on 32 bytes.
46 */
47#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
48
49/* Number of bytes for CAIF shared memory header. */
50#define SHM_HDR_LEN 1
51
52/* Number of padding bytes for the complete CAIF frame. */
53#define SHM_FRM_PAD_LEN 4
54
55#define CAIF_MAX_MTU 4096
56
57#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
59
60#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
61#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
62
63#define SHM_FULL_MASK (0x0F << 0)
64#define SHM_EMPTY_MASK (0x0F << 4)
65
66struct shm_pck_desc {
67 /*
68 * Offset from start of shared memory area to start of
69 * shared memory CAIF frame.
70 */
71 u32 frm_ofs;
72 u32 frm_len;
73};
74
75struct buf_list {
76 unsigned char *desc_vptr;
77 u32 phy_addr;
78 u32 index;
79 u32 len;
80 u32 frames;
81 u32 frm_ofs;
82 struct list_head list;
83};
84
85struct shm_caif_frm {
86 /* Number of bytes of padding before the CAIF frame. */
87 u8 hdr_ofs;
88};
89
90struct shmdrv_layer {
91 /* caif_dev_common must always be first in the structure*/
92 struct caif_dev_common cfdev;
93
94 u32 shm_tx_addr;
95 u32 shm_rx_addr;
96 u32 shm_base_addr;
97 u32 tx_empty_available;
98 spinlock_t lock;
99
100 struct list_head tx_empty_list;
101 struct list_head tx_pend_list;
102 struct list_head tx_full_list;
103 struct list_head rx_empty_list;
104 struct list_head rx_pend_list;
105 struct list_head rx_full_list;
106
107 struct workqueue_struct *pshm_tx_workqueue;
108 struct workqueue_struct *pshm_rx_workqueue;
109
110 struct work_struct shm_tx_work;
111 struct work_struct shm_rx_work;
112
113 struct sk_buff_head sk_qhead;
114 struct shmdev_layer *pshm_dev;
115};
116
117static int shm_netdev_open(struct net_device *shm_netdev)
118{
119 netif_wake_queue(shm_netdev);
120 return 0;
121}
122
123static int shm_netdev_close(struct net_device *shm_netdev)
124{
125 netif_stop_queue(shm_netdev);
126 return 0;
127}
128
129int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
130{
131 struct buf_list *pbuf;
132 struct shmdrv_layer *pshm_drv;
133 struct list_head *pos;
134 u32 avail_emptybuff = 0;
135 unsigned long flags = 0;
136
Joe Perches43d620c2011-06-16 19:08:06 +0000137 pshm_drv = priv;
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000138
139 /* Check for received buffers. */
140 if (mbx_msg & SHM_FULL_MASK) {
141 int idx;
142
143 spin_lock_irqsave(&pshm_drv->lock, flags);
144
145 /* Check whether we have any outstanding buffers. */
146 if (list_empty(&pshm_drv->rx_empty_list)) {
147
148 /* Release spin lock. */
149 spin_unlock_irqrestore(&pshm_drv->lock, flags);
150
151 /* We print even in IRQ context... */
152 pr_warn("No empty Rx buffers to fill: "
153 "mbx_msg:%x\n", mbx_msg);
154
155 /* Bail out. */
156 goto err_sync;
157 }
158
159 pbuf =
160 list_entry(pshm_drv->rx_empty_list.next,
161 struct buf_list, list);
162 idx = pbuf->index;
163
164 /* Check buffer synchronization. */
165 if (idx != SHM_GET_FULL(mbx_msg)) {
166
167 /* We print even in IRQ context... */
168 pr_warn(
169 "phyif_shm_mbx_msg_cb: RX full out of sync:"
170 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
172
173 spin_unlock_irqrestore(&pshm_drv->lock, flags);
174
175 /* Bail out. */
176 goto err_sync;
177 }
178
179 list_del_init(&pbuf->list);
180 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
181
182 spin_unlock_irqrestore(&pshm_drv->lock, flags);
183
184 /* Schedule RX work queue. */
185 if (!work_pending(&pshm_drv->shm_rx_work))
186 queue_work(pshm_drv->pshm_rx_workqueue,
187 &pshm_drv->shm_rx_work);
188 }
189
190 /* Check for emptied buffers. */
191 if (mbx_msg & SHM_EMPTY_MASK) {
192 int idx;
193
194 spin_lock_irqsave(&pshm_drv->lock, flags);
195
196 /* Check whether we have any outstanding buffers. */
197 if (list_empty(&pshm_drv->tx_full_list)) {
198
199 /* We print even in IRQ context... */
200 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
201
202 spin_unlock_irqrestore(&pshm_drv->lock, flags);
203
204 /* Bail out. */
205 goto err_sync;
206 }
207
208 pbuf =
209 list_entry(pshm_drv->tx_full_list.next,
210 struct buf_list, list);
211 idx = pbuf->index;
212
213 /* Check buffer synchronization. */
214 if (idx != SHM_GET_EMPTY(mbx_msg)) {
215
216 spin_unlock_irqrestore(&pshm_drv->lock, flags);
217
218 /* We print even in IRQ context... */
219 pr_warn("TX empty "
220 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
221
222 /* Bail out. */
223 goto err_sync;
224 }
225 list_del_init(&pbuf->list);
226
227 /* Reset buffer parameters. */
228 pbuf->frames = 0;
229 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
230
231 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
232
233 /* Check the available no. of buffers in the empty list */
234 list_for_each(pos, &pshm_drv->tx_empty_list)
235 avail_emptybuff++;
236
237 /* Check whether we have to wake up the transmitter. */
238 if ((avail_emptybuff > HIGH_WATERMARK) &&
239 (!pshm_drv->tx_empty_available)) {
240 pshm_drv->tx_empty_available = 1;
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000241 spin_unlock_irqrestore(&pshm_drv->lock, flags);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000242 pshm_drv->cfdev.flowctrl
243 (pshm_drv->pshm_dev->pshm_netdev,
244 CAIF_FLOW_ON);
245
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000246
247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv->shm_tx_work))
249 queue_work(pshm_drv->pshm_tx_workqueue,
250 &pshm_drv->shm_tx_work);
251 } else
252 spin_unlock_irqrestore(&pshm_drv->lock, flags);
253 }
254
255 return 0;
256
257err_sync:
258 return -EIO;
259}
260
261static void shm_rx_work_func(struct work_struct *rx_work)
262{
263 struct shmdrv_layer *pshm_drv;
264 struct buf_list *pbuf;
265 unsigned long flags = 0;
266 struct sk_buff *skb;
267 char *p;
268 int ret;
269
270 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
271
272 while (1) {
273
274 struct shm_pck_desc *pck_desc;
275
276 spin_lock_irqsave(&pshm_drv->lock, flags);
277
278 /* Check for received buffers. */
279 if (list_empty(&pshm_drv->rx_full_list)) {
280 spin_unlock_irqrestore(&pshm_drv->lock, flags);
281 break;
282 }
283
284 pbuf =
285 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286 list);
287 list_del_init(&pbuf->list);
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000288 spin_unlock_irqrestore(&pshm_drv->lock, flags);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000289
290 /* Retrieve pointer to start of the packet descriptor area. */
291 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
292
293 /*
294 * Check whether descriptor contains a CAIF shared memory
295 * frame.
296 */
297 while (pck_desc->frm_ofs) {
298 unsigned int frm_buf_ofs;
299 unsigned int frm_pck_ofs;
300 unsigned int frm_pck_len;
301 /*
302 * Check whether offset is within buffer limits
303 * (lower).
304 */
305 if (pck_desc->frm_ofs <
306 (pbuf->phy_addr - pshm_drv->shm_base_addr))
307 break;
308 /*
309 * Check whether offset is within buffer limits
310 * (higher).
311 */
312 if (pck_desc->frm_ofs >
313 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
314 pbuf->len))
315 break;
316
317 /* Calculate offset from start of buffer. */
318 frm_buf_ofs =
319 pck_desc->frm_ofs - (pbuf->phy_addr -
320 pshm_drv->shm_base_addr);
321
322 /*
323 * Calculate offset and length of CAIF packet while
324 * taking care of the shared memory header.
325 */
326 frm_pck_ofs =
327 frm_buf_ofs + SHM_HDR_LEN +
328 (*(pbuf->desc_vptr + frm_buf_ofs));
329 frm_pck_len =
330 (pck_desc->frm_len - SHM_HDR_LEN -
331 (*(pbuf->desc_vptr + frm_buf_ofs)));
332
333 /* Check whether CAIF packet is within buffer limits */
334 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
335 break;
336
337 /* Get a suitable CAIF packet and copy in data. */
338 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
339 frm_pck_len + 1);
340 BUG_ON(skb == NULL);
341
342 p = skb_put(skb, frm_pck_len);
343 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
344
345 skb->protocol = htons(ETH_P_CAIF);
346 skb_reset_mac_header(skb);
347 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
348
349 /* Push received packet up the stack. */
350 ret = netif_rx_ni(skb);
351
352 if (!ret) {
353 pshm_drv->pshm_dev->pshm_netdev->stats.
354 rx_packets++;
355 pshm_drv->pshm_dev->pshm_netdev->stats.
356 rx_bytes += pck_desc->frm_len;
357 } else
358 ++pshm_drv->pshm_dev->pshm_netdev->stats.
359 rx_dropped;
360 /* Move to next packet descriptor. */
361 pck_desc++;
362 }
363
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000364 spin_lock_irqsave(&pshm_drv->lock, flags);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000365 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
366
367 spin_unlock_irqrestore(&pshm_drv->lock, flags);
368
369 }
370
371 /* Schedule the work queue. if required */
372 if (!work_pending(&pshm_drv->shm_tx_work))
373 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
374
375}
376
377static void shm_tx_work_func(struct work_struct *tx_work)
378{
379 u32 mbox_msg;
380 unsigned int frmlen, avail_emptybuff, append = 0;
381 unsigned long flags = 0;
382 struct buf_list *pbuf = NULL;
383 struct shmdrv_layer *pshm_drv;
384 struct shm_caif_frm *frm;
385 struct sk_buff *skb;
386 struct shm_pck_desc *pck_desc;
387 struct list_head *pos;
388
389 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
390
391 do {
392 /* Initialize mailbox message. */
393 mbox_msg = 0x00;
394 avail_emptybuff = 0;
395
396 spin_lock_irqsave(&pshm_drv->lock, flags);
397
398 /* Check for pending receive buffers. */
399 if (!list_empty(&pshm_drv->rx_pend_list)) {
400
401 pbuf = list_entry(pshm_drv->rx_pend_list.next,
402 struct buf_list, list);
403
404 list_del_init(&pbuf->list);
405 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
406 /*
407 * Value index is never changed,
408 * so read access should be safe.
409 */
410 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
411 }
412
413 skb = skb_peek(&pshm_drv->sk_qhead);
414
415 if (skb == NULL)
416 goto send_msg;
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000417 /* Check the available no. of buffers in the empty list */
418 list_for_each(pos, &pshm_drv->tx_empty_list)
419 avail_emptybuff++;
420
421 if ((avail_emptybuff < LOW_WATERMARK) &&
422 pshm_drv->tx_empty_available) {
423 /* Update blocking condition. */
424 pshm_drv->tx_empty_available = 0;
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000425 spin_unlock_irqrestore(&pshm_drv->lock, flags);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000426 pshm_drv->cfdev.flowctrl
427 (pshm_drv->pshm_dev->pshm_netdev,
428 CAIF_FLOW_OFF);
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000429 spin_lock_irqsave(&pshm_drv->lock, flags);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000430 }
431 /*
432 * We simply return back to the caller if we do not have space
433 * either in Tx pending list or Tx empty list. In this case,
434 * we hold the received skb in the skb list, waiting to
435 * be transmitted once Tx buffers become available
436 */
437 if (list_empty(&pshm_drv->tx_empty_list))
438 goto send_msg;
439
440 /* Get the first free Tx buffer. */
441 pbuf = list_entry(pshm_drv->tx_empty_list.next,
442 struct buf_list, list);
443 do {
444 if (append) {
445 skb = skb_peek(&pshm_drv->sk_qhead);
446 if (skb == NULL)
447 break;
448 }
449
450 frm = (struct shm_caif_frm *)
451 (pbuf->desc_vptr + pbuf->frm_ofs);
452
453 frm->hdr_ofs = 0;
454 frmlen = 0;
455 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
456
457 /* Add tail padding if needed. */
458 if (frmlen % SHM_FRM_PAD_LEN)
459 frmlen += SHM_FRM_PAD_LEN -
460 (frmlen % SHM_FRM_PAD_LEN);
461
462 /*
463 * Verify that packet, header and additional padding
464 * can fit within the buffer frame area.
465 */
466 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
467 break;
468
469 if (!append) {
470 list_del_init(&pbuf->list);
471 append = 1;
472 }
473
474 skb = skb_dequeue(&pshm_drv->sk_qhead);
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000475 if (skb == NULL)
476 break;
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000477 /* Copy in CAIF frame. */
478 skb_copy_bits(skb, 0, pbuf->desc_vptr +
479 pbuf->frm_ofs + SHM_HDR_LEN +
480 frm->hdr_ofs, skb->len);
481
482 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
483 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
484 frmlen;
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000485 dev_kfree_skb_irq(skb);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000486
487 /* Fill in the shared memory packet descriptor area. */
488 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
489 /* Forward to current frame. */
490 pck_desc += pbuf->frames;
491 pck_desc->frm_ofs = (pbuf->phy_addr -
492 pshm_drv->shm_base_addr) +
493 pbuf->frm_ofs;
494 pck_desc->frm_len = frmlen;
495 /* Terminate packet descriptor area. */
496 pck_desc++;
497 pck_desc->frm_ofs = 0;
498 /* Update buffer parameters. */
499 pbuf->frames++;
500 pbuf->frm_ofs += frmlen + (frmlen % 32);
501
502 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
503
504 /* Assign buffer as full. */
505 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
506 append = 0;
507 mbox_msg |= SHM_SET_FULL(pbuf->index);
508send_msg:
509 spin_unlock_irqrestore(&pshm_drv->lock, flags);
510
511 if (mbox_msg)
512 pshm_drv->pshm_dev->pshmdev_mbxsend
513 (pshm_drv->pshm_dev->shm_id, mbox_msg);
514 } while (mbox_msg);
515}
516
517static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
518{
519 struct shmdrv_layer *pshm_drv;
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000520
521 pshm_drv = netdev_priv(shm_netdev);
522
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000523 skb_queue_tail(&pshm_drv->sk_qhead, skb);
524
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000525 /* Schedule Tx work queue. for deferred processing of skbs*/
526 if (!work_pending(&pshm_drv->shm_tx_work))
527 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
528
529 return 0;
530}
531
532static const struct net_device_ops netdev_ops = {
533 .ndo_open = shm_netdev_open,
534 .ndo_stop = shm_netdev_close,
535 .ndo_start_xmit = shm_netdev_tx,
536};
537
538static void shm_netdev_setup(struct net_device *pshm_netdev)
539{
540 struct shmdrv_layer *pshm_drv;
541 pshm_netdev->netdev_ops = &netdev_ops;
542
543 pshm_netdev->mtu = CAIF_MAX_MTU;
544 pshm_netdev->type = ARPHRD_CAIF;
545 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
546 pshm_netdev->tx_queue_len = 0;
547 pshm_netdev->destructor = free_netdev;
548
549 pshm_drv = netdev_priv(pshm_netdev);
550
551 /* Initialize structures in a clean state. */
552 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
553
554 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
555}
556
557int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
558{
559 int result, j;
560 struct shmdrv_layer *pshm_drv = NULL;
561
562 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
563 "cfshm%d", shm_netdev_setup);
564 if (!pshm_dev->pshm_netdev)
565 return -ENOMEM;
566
567 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
568 pshm_drv->pshm_dev = pshm_dev;
569
570 /*
571 * Initialization starts with the verification of the
572 * availability of MBX driver by calling its setup function.
573 * MBX driver must be available by this time for proper
574 * functioning of SHM driver.
575 */
576 if ((pshm_dev->pshmdev_mbxsetup
577 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
578 pr_warn("Could not config. SHM Mailbox,"
579 " Bailing out.....\n");
580 free_netdev(pshm_dev->pshm_netdev);
581 return -ENODEV;
582 }
583
584 skb_queue_head_init(&pshm_drv->sk_qhead);
585
586 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
587 " INSTANCE AT pshm_drv =0x%p\n",
588 pshm_drv->pshm_dev->shm_id, pshm_drv);
589
590 if (pshm_dev->shm_total_sz <
591 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
592
593 pr_warn("ERROR, Amount of available"
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300594 " Phys. SHM cannot accommodate current SHM "
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000595 "driver configuration, Bailing out ...\n");
596 free_netdev(pshm_dev->pshm_netdev);
597 return -ENOMEM;
598 }
599
600 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
601 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
602
603 if (pshm_dev->shm_loopback)
604 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
605 else
606 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
607 (NR_TX_BUF * TX_BUF_SZ);
608
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000609 spin_lock_init(&pshm_drv->lock);
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000610 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
611 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
612 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
613
614 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
615 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
616 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
617
618 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
619 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
620
621 pshm_drv->pshm_tx_workqueue =
622 create_singlethread_workqueue("shm_tx_work");
623 pshm_drv->pshm_rx_workqueue =
624 create_singlethread_workqueue("shm_rx_work");
625
626 for (j = 0; j < NR_TX_BUF; j++) {
627 struct buf_list *tx_buf =
628 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
629
630 if (tx_buf == NULL) {
631 pr_warn("ERROR, Could not"
632 " allocate dynamic mem. for tx_buf,"
633 " Bailing out ...\n");
634 free_netdev(pshm_dev->pshm_netdev);
635 return -ENOMEM;
636 }
637 tx_buf->index = j;
638 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
639 tx_buf->len = TX_BUF_SZ;
640 tx_buf->frames = 0;
641 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
642
643 if (pshm_dev->shm_loopback)
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000644 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000645 else
646 tx_buf->desc_vptr =
647 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
648
649 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
650 }
651
652 for (j = 0; j < NR_RX_BUF; j++) {
653 struct buf_list *rx_buf =
654 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
655
656 if (rx_buf == NULL) {
657 pr_warn("ERROR, Could not"
658 " allocate dynamic mem.for rx_buf,"
659 " Bailing out ...\n");
660 free_netdev(pshm_dev->pshm_netdev);
661 return -ENOMEM;
662 }
663 rx_buf->index = j;
664 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
665 rx_buf->len = RX_BUF_SZ;
666
667 if (pshm_dev->shm_loopback)
sjur.brandeland@stericsson.com095d2a72011-12-06 12:15:43 +0000668 rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
sjur.brandeland@stericsson.comdfae55d2010-10-27 08:34:40 +0000669 else
670 rx_buf->desc_vptr =
671 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
672 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
673 }
674
675 pshm_drv->tx_empty_available = 1;
676 result = register_netdev(pshm_dev->pshm_netdev);
677 if (result)
678 pr_warn("ERROR[%d], SHM could not, "
679 "register with NW FRMWK Bailing out ...\n", result);
680
681 return result;
682}
683
684void caif_shmcore_remove(struct net_device *pshm_netdev)
685{
686 struct buf_list *pbuf;
687 struct shmdrv_layer *pshm_drv = NULL;
688
689 pshm_drv = netdev_priv(pshm_netdev);
690
691 while (!(list_empty(&pshm_drv->tx_pend_list))) {
692 pbuf =
693 list_entry(pshm_drv->tx_pend_list.next,
694 struct buf_list, list);
695
696 list_del(&pbuf->list);
697 kfree(pbuf);
698 }
699
700 while (!(list_empty(&pshm_drv->tx_full_list))) {
701 pbuf =
702 list_entry(pshm_drv->tx_full_list.next,
703 struct buf_list, list);
704 list_del(&pbuf->list);
705 kfree(pbuf);
706 }
707
708 while (!(list_empty(&pshm_drv->tx_empty_list))) {
709 pbuf =
710 list_entry(pshm_drv->tx_empty_list.next,
711 struct buf_list, list);
712 list_del(&pbuf->list);
713 kfree(pbuf);
714 }
715
716 while (!(list_empty(&pshm_drv->rx_full_list))) {
717 pbuf =
718 list_entry(pshm_drv->tx_full_list.next,
719 struct buf_list, list);
720 list_del(&pbuf->list);
721 kfree(pbuf);
722 }
723
724 while (!(list_empty(&pshm_drv->rx_pend_list))) {
725 pbuf =
726 list_entry(pshm_drv->tx_pend_list.next,
727 struct buf_list, list);
728 list_del(&pbuf->list);
729 kfree(pbuf);
730 }
731
732 while (!(list_empty(&pshm_drv->rx_empty_list))) {
733 pbuf =
734 list_entry(pshm_drv->rx_empty_list.next,
735 struct buf_list, list);
736 list_del(&pbuf->list);
737 kfree(pbuf);
738 }
739
740 /* Destroy work queues. */
741 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
742 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
743
744 unregister_netdev(pshm_netdev);
745}