blob: 8d59af0ceec79cae68c392f086797deef295d8df [file] [log] [blame]
Tejaswi Tanikella80f971a2018-03-21 11:21:30 +05301/* Copyright (c) 2011-2016, 2018 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/if_arp.h>
29#include <linux/msm_rmnet.h>
30#include <linux/platform_device.h>
31#include <linux/workqueue.h>
32
33#include <net/pkt_sched.h>
34
35#include <soc/qcom/bam_dmux.h>
36
37/* Debug message support */
38static int msm_rmnet_bam_debug_mask;
39module_param_named(debug_enable, msm_rmnet_bam_debug_mask, int, 0664);
40 /* S_IRUGO | S_IWUSR | S_IWGRP */
41
42static unsigned long int msm_rmnet_bam_headroom_check_failure;
43module_param(msm_rmnet_bam_headroom_check_failure, ulong, 0444); /*S_IRUGO*/
44MODULE_PARM_DESC(msm_rmnet_bam_headroom_check_failure,
45 "Number of packets with insufficient headroom");
46
47/* Packet threshold. */
48static unsigned int pkt_threshold = 1;
49module_param(pkt_threshold, uint, 0664); /* S_IRUGO | S_IWUSR | S_IWGRP */
50
51#define DEBUG_MASK_LVL0 (1U << 0)
52#define DEBUG_MASK_LVL1 (1U << 1)
53#define DEBUG_MASK_LVL2 (1U << 2)
54
55#define DBG(m, x...) do { \
56 if (msm_rmnet_bam_debug_mask & m) \
57 pr_info(x); \
58} while (0)
59#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
60#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
61#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
62
63/* allow larger frames */
64#define RMNET_DATA_LEN 2000
65
66#define RMNET_BAM_DRIVER_NAME "rmnet_bam"
67
68#define DEVICE_ID_INVALID -1
69
70#define DEVICE_INACTIVE 2
71#define DEVICE_ACTIVE 1
72#define DEVICE_UNINITIALIZED 0
73
74#define HEADROOM_FOR_BAM 8 /* for mux header */
75#define HEADROOM_FOR_QOS 8
76#define TAILROOM 8 /* for padding by mux layer */
77
78struct rmnet_private {
79 struct net_device_stats stats;
80 uint32_t ch_id;
81#ifdef CONFIG_MSM_RMNET_DEBUG
82 ktime_t last_packet;
83 unsigned long wakeups_xmit;
84 unsigned long wakeups_rcv;
85 unsigned long timeout_us;
86#endif
87 struct sk_buff *waiting_for_ul_skb;
88 spinlock_t lock;
89 spinlock_t tx_queue_lock;
90 struct tasklet_struct tsklt;
91 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
92 uint8_t device_up;
93 uint8_t in_reset;
94};
95
96struct rmnet_free_bam_work {
97 struct work_struct work;
98 uint32_t ch_id;
99};
100
101#ifdef CONFIG_MSM_RMNET_DEBUG
102static unsigned long timeout_us;
103
104/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
105static int rmnet_cause_wakeup(struct rmnet_private *p)
106{
107 int ret = 0;
108 ktime_t now;
109
110 if (p->timeout_us == 0) /* Check if disabled */
111 return 0;
112
113 /* Use real (wall) time. */
114 now = ktime_get_real();
115
116 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
117 ret = 1;
118
119 p->last_packet = now;
120 return ret;
121}
122
123static ssize_t wakeups_xmit_show(struct device *d,
124 struct device_attribute *attr,
125 char *buf)
126{
127 struct rmnet_private *p = netdev_priv(to_net_dev(d));
128
129 return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_xmit);
130}
131
132DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
133
134static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
135 char *buf)
136{
137 struct rmnet_private *p = netdev_priv(to_net_dev(d));
138
139 return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_rcv);
140}
141
142DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
143
144/* Set timeout in us. */
145static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
146 const char *buf, size_t n)
147{
148 struct rmnet_private *p = netdev_priv(to_net_dev(d));
149 int ret;
150
151 ret = kstrtoul(buf, 0, &timeout_us);
152 if (ret < 0)
153 return n;
154 p->timeout_us = timeout_us;
155 return n;
156}
157
158static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
159 char *buf)
160{
161 struct rmnet_private *p = netdev_priv(to_net_dev(d));
162
163 p = netdev_priv(to_net_dev(d));
164 return snprintf(buf, PAGE_SIZE, "%lu\n", timeout_us);
165}
166
167DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
168#endif
169
170
171/* Forward declaration */
172static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
173static struct platform_driver bam_rmnet_drivers[BAM_DMUX_NUM_CHANNELS];
174
175static struct net_device *netdevs[BAM_DMUX_NUM_CHANNELS];
176
177static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
178{
179 __be16 protocol = 0;
180
181 skb->dev = dev;
182
183 /* Determine L3 protocol */
184 switch (skb->data[0] & 0xf0) {
185 case 0x40:
186 protocol = htons(ETH_P_IP);
187 break;
188 case 0x60:
189 protocol = htons(ETH_P_IPV6);
190 break;
191 default:
192 protocol = htons(ETH_P_MAP);
193 }
194 return protocol;
195}
196
197static int count_this_packet(void *_hdr, int len)
198{
199 struct ethhdr *hdr = _hdr;
200
201 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
202 return 0;
203
204 return 1;
205}
206
207/* Rx Callback, Called in Work Queue context */
208static void bam_recv_notify(void *dev, struct sk_buff *skb)
209{
210 struct rmnet_private *p = netdev_priv(dev);
211 unsigned long flags;
212 u32 opmode;
213
214 if (skb) {
215 skb->dev = dev;
216 /* Handle Rx frame format */
217 spin_lock_irqsave(&p->lock, flags);
218 opmode = p->operation_mode;
219 spin_unlock_irqrestore(&p->lock, flags);
220
221 if (RMNET_IS_MODE_IP(opmode)) {
222 /* Driver in IP mode */
223 skb->protocol = rmnet_ip_type_trans(skb, dev);
224 } else {
225 /* Driver in Ethernet mode */
226 skb->protocol = eth_type_trans(skb, dev);
227 }
228 if (RMNET_IS_MODE_IP(opmode) ||
229 count_this_packet(skb->data, skb->len)) {
230#ifdef CONFIG_MSM_RMNET_DEBUG
231 p->wakeups_rcv += rmnet_cause_wakeup(p);
232#endif
233 p->stats.rx_packets++;
234 p->stats.rx_bytes += skb->len;
235 }
236 DBG1("[%s] Rx packet #%lu len=%d\n",
237 ((struct net_device *)dev)->name,
238 p->stats.rx_packets, skb->len);
239
240 /* Deliver to network stack */
241 if (pkt_threshold == 1) {
242 netif_rx_ni(skb);
243 } else {
244 /* For every nth packet, use netif_rx_ni(). */
245 if (p->stats.rx_packets % pkt_threshold == 0)
246 netif_rx_ni(skb);
247 else
248 netif_rx(skb);
249 }
250 } else
251 pr_err("[%s] %s: No skb received",
252 ((struct net_device *)dev)->name, __func__);
253}
254
255static struct sk_buff *_rmnet_add_headroom(struct sk_buff **skb,
256 struct net_device *dev)
257{
258 struct sk_buff *skbn;
259
260 if (skb_headroom(*skb) < dev->needed_headroom) {
261 msm_rmnet_bam_headroom_check_failure++;
262 skbn = skb_realloc_headroom(*skb, dev->needed_headroom);
263 kfree_skb(*skb);
264 *skb = skbn;
265 } else {
266 skbn = *skb;
267 }
268
269 return skbn;
270}
271
272static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
273{
274 struct rmnet_private *p = netdev_priv(dev);
275 int bam_ret;
276 struct QMI_QOS_HDR_S *qmih;
277 u32 opmode;
278 unsigned long flags;
279
280 if (unlikely(!_rmnet_add_headroom(&skb, dev))) {
281 dev->stats.tx_dropped++;
282 return NETDEV_TX_OK;
283 }
284 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
285 spin_lock_irqsave(&p->lock, flags);
286 opmode = p->operation_mode;
287 spin_unlock_irqrestore(&p->lock, flags);
288
289 if (RMNET_IS_MODE_QOS(opmode)) {
290 qmih = (struct QMI_QOS_HDR_S *)
291 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
292 qmih->version = 1;
293 qmih->flags = 0;
294 qmih->flow_id = skb->mark;
295 }
296
297 //Mayank dev->trans_start = jiffies;
298 /* if write() succeeds, skb access is unsafe in this process */
299 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
300
301 if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
302 pr_err("[%s] %s: write returned error %d",
303 dev->name, __func__, bam_ret);
304 if (RMNET_IS_MODE_QOS(opmode))
305 skb_pull(skb, sizeof(struct QMI_QOS_HDR_S));
306 return -EPERM;
307 }
308
309 return bam_ret;
310}
311
312static void bam_write_done(void *dev, struct sk_buff *skb)
313{
314 struct rmnet_private *p = netdev_priv(dev);
315 u32 opmode = p->operation_mode;
316 unsigned long flags;
317
318 DBG1("%s: write complete\n", __func__);
319 if (RMNET_IS_MODE_IP(opmode) ||
320 count_this_packet(skb->data, skb->len)) {
321 p->stats.tx_packets++;
322 p->stats.tx_bytes += skb->len;
323#ifdef CONFIG_MSM_RMNET_DEBUG
324 p->wakeups_xmit += rmnet_cause_wakeup(p);
325#endif
326 }
327 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
328 ((struct net_device *)(dev))->name, p->stats.tx_packets,
329 skb->len, skb->mark);
330 dev_kfree_skb_any(skb);
331
332 spin_lock_irqsave(&p->tx_queue_lock, flags);
333 if (netif_queue_stopped(dev) &&
334 msm_bam_dmux_is_ch_low(p->ch_id)) {
335 DBG0("%s: Low WM hit, waking queue=%pK\n",
336 __func__, skb);
337 netif_wake_queue(dev);
338 }
339 spin_unlock_irqrestore(&p->tx_queue_lock, flags);
340}
341
342static void bam_notify(void *dev, int event, unsigned long data)
343{
344 struct rmnet_private *p = netdev_priv(dev);
345 unsigned long flags;
346
347 switch (event) {
348 case BAM_DMUX_RECEIVE:
349 bam_recv_notify(dev, (struct sk_buff *)(data));
350 break;
351 case BAM_DMUX_WRITE_DONE:
352 bam_write_done(dev, (struct sk_buff *)(data));
353 break;
354 case BAM_DMUX_UL_CONNECTED:
355 spin_lock_irqsave(&p->lock, flags);
356 if (p->waiting_for_ul_skb != NULL) {
357 struct sk_buff *skb;
358 int ret;
359
360 skb = p->waiting_for_ul_skb;
361 p->waiting_for_ul_skb = NULL;
362 spin_unlock_irqrestore(&p->lock, flags);
363 ret = _rmnet_xmit(skb, dev);
364 if (ret) {
365 pr_err("%s: error %d dropping delayed TX SKB %pK\n",
366 __func__, ret, skb);
367 dev_kfree_skb_any(skb);
368 }
369 netif_wake_queue(dev);
370 } else {
371 spin_unlock_irqrestore(&p->lock, flags);
372 }
373 break;
374 case BAM_DMUX_UL_DISCONNECTED:
375 break;
376 }
377}
378
379static int __rmnet_open(struct net_device *dev)
380{
381 int r;
382 struct rmnet_private *p = netdev_priv(dev);
383
384 DBG0("[%s] __rmnet_open()\n", dev->name);
385
386 if (p->device_up == DEVICE_UNINITIALIZED) {
387 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
388 if (r < 0) {
389 DBG0("%s: ch=%d failed with rc %d\n",
390 __func__, p->ch_id, r);
391 return -ENODEV;
392 }
393 }
394
395 p->device_up = DEVICE_ACTIVE;
396 return 0;
397}
398
399static int rmnet_open(struct net_device *dev)
400{
401 int rc = 0;
402
403 DBG0("[%s] rmnet_open()\n", dev->name);
404
405 rc = __rmnet_open(dev);
406
407 if (rc == 0)
408 netif_start_queue(dev);
409
410 return rc;
411}
412
413
414static int __rmnet_close(struct net_device *dev)
415{
416 struct rmnet_private *p = netdev_priv(dev);
417 int rc = 0;
418
419 if (p->device_up == DEVICE_ACTIVE) {
420 /* do not close rmnet port once up, this causes
421 * remote side to hang if tried to open again
422 */
423 p->device_up = DEVICE_INACTIVE;
424 return rc;
425 } else
426 return -EBADF;
427}
428
429
430static int rmnet_stop(struct net_device *dev)
431{
432 DBG0("[%s] rmnet_stop()\n", dev->name);
433
434 __rmnet_close(dev);
435 netif_stop_queue(dev);
436
437 return 0;
438}
439
440static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
441{
442 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
443 return -EINVAL;
444
445 DBG0("[%s] MTU change: old=%d new=%d\n",
446 dev->name, dev->mtu, new_mtu);
447 dev->mtu = new_mtu;
448
449 return 0;
450}
451
452static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
453{
454 struct rmnet_private *p = netdev_priv(dev);
455 unsigned long flags;
456 int awake;
457 int ret = 0;
458
459 if (netif_queue_stopped(dev)) {
460 pr_err("[%s]debug: rmnet_xmit called when netif_queue is stopped\n",
461 dev->name);
462 return NETDEV_TX_BUSY;
463 }
464
465 spin_lock_irqsave(&p->lock, flags);
466 awake = msm_bam_dmux_ul_power_vote();
467 if (!awake) {
468 /* send SKB once wakeup is complete */
469 netif_stop_queue(dev);
470 p->waiting_for_ul_skb = skb;
471 spin_unlock_irqrestore(&p->lock, flags);
472 ret = 0;
473 goto exit;
474 }
475 spin_unlock_irqrestore(&p->lock, flags);
476
477 ret = _rmnet_xmit(skb, dev);
478 if (ret == -EPERM) {
479 ret = NETDEV_TX_BUSY;
480 goto exit;
481 }
482
483 /*
484 * detected SSR a bit early. shut some things down now, and leave
485 * the rest to the main ssr handling code when that happens later
486 */
487 if (ret == -EFAULT) {
488 netif_carrier_off(dev);
489 dev_kfree_skb_any(skb);
490 ret = 0;
491 goto exit;
492 }
493
494 if (ret == -EAGAIN) {
495 /*
496 * This should not happen
497 * EAGAIN means we attempted to overflow the high watermark
498 * Clearly the queue is not stopped like it should be, so
499 * stop it and return BUSY to the TCP/IP framework. It will
500 * retry this packet with the queue is restarted which happens
501 * in the write_done callback when the low watermark is hit.
502 */
503 netif_stop_queue(dev);
504 ret = NETDEV_TX_BUSY;
505 goto exit;
506 }
507
508 spin_lock_irqsave(&p->tx_queue_lock, flags);
509 if (msm_bam_dmux_is_ch_full(p->ch_id)) {
510 netif_stop_queue(dev);
511 DBG0("%s: High WM hit, stopping queue=%pK\n", __func__, skb);
512 }
513 spin_unlock_irqrestore(&p->tx_queue_lock, flags);
514
515exit:
516 msm_bam_dmux_ul_power_unvote();
517 return ret;
518}
519
520static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
521{
522 struct rmnet_private *p = netdev_priv(dev);
523
524 return &p->stats;
525}
526
527static void rmnet_tx_timeout(struct net_device *dev)
528{
529 pr_warn("[%s] rmnet_tx_timeout()\n", dev->name);
530}
531
532static const struct net_device_ops rmnet_ops_ether = {
533 .ndo_open = rmnet_open,
534 .ndo_stop = rmnet_stop,
535 .ndo_start_xmit = rmnet_xmit,
536 .ndo_get_stats = rmnet_get_stats,
537 .ndo_tx_timeout = rmnet_tx_timeout,
538 .ndo_do_ioctl = rmnet_ioctl,
539 .ndo_change_mtu = rmnet_change_mtu,
540 .ndo_set_mac_address = eth_mac_addr,
541 .ndo_validate_addr = eth_validate_addr,
542};
543
544static const struct net_device_ops rmnet_ops_ip = {
545 .ndo_open = rmnet_open,
546 .ndo_stop = rmnet_stop,
547 .ndo_start_xmit = rmnet_xmit,
548 .ndo_get_stats = rmnet_get_stats,
549 .ndo_tx_timeout = rmnet_tx_timeout,
550 .ndo_do_ioctl = rmnet_ioctl,
551 .ndo_change_mtu = rmnet_change_mtu,
552 .ndo_set_mac_address = NULL,
553 .ndo_validate_addr = NULL,
554};
555
556static void _rmnet_free_bam_later(struct work_struct *work)
557{
558 struct rmnet_free_bam_work *fwork;
559
560 fwork = container_of(work, struct rmnet_free_bam_work, work);
561
562 DBG0("%s: unregister_netdev, done\n", __func__);
563
564 if (bam_rmnet_drivers[fwork->ch_id].remove) {
565 platform_driver_unregister(&bam_rmnet_drivers[fwork->ch_id]);
566 bam_rmnet_drivers[fwork->ch_id].remove = NULL;
567 }
568
569 DBG0("%s: free_netdev, done\n", __func__);
570
571 kfree(work);
572}
573
574static int rmnet_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
575{
576 struct rmnet_ioctl_extended_s ext_cmd;
577 int rc = 0;
578 struct rmnet_private *p = netdev_priv(dev);
579 struct rmnet_free_bam_work *work;
580
581 rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
582 sizeof(ext_cmd));
583
584 if (rc) {
585 pr_err("%s: copy_from_user failed ,error %d\n", __func__, rc);
586 return -EFAULT;
587 }
588
589 switch (ext_cmd.extended_ioctl) {
590 case RMNET_IOCTL_SET_MRU:
591 /* Transport MRU is fixed, so do nothing */
592 break;
593 case RMNET_IOCTL_GET_EPID:
594 ext_cmd.u.data = p->ch_id;
595 break;
596 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
597 ext_cmd.u.data = 0;
598 break;
599 case RMNET_IOCTL_GET_DRIVER_NAME:
600 strlcpy(ext_cmd.u.if_name, RMNET_BAM_DRIVER_NAME,
601 sizeof(ext_cmd.u.if_name));
602 break;
603 case RMNET_IOCTL_DEREGISTER_DEV:
604 work = kmalloc(sizeof(*work), GFP_KERNEL);
605 if (!work)
606 break;
607 INIT_WORK(&work->work, _rmnet_free_bam_later);
608
609 work->ch_id = p->ch_id;
610 schedule_work(&work->work);
611 break;
612 default:
613 rc = -EINVAL;
614 break;
615 }
616
617 rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, sizeof(ext_cmd));
618
619 if (rc)
620 pr_err("%s: copy_to_user failed, error %d\n", __func__, rc);
621
622 return rc ? -EFAULT : 0;
623}
624
625static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
626{
627 struct rmnet_private *p = netdev_priv(dev);
628 u32 old_opmode = p->operation_mode;
629 unsigned long flags;
630 int prev_mtu = dev->mtu;
631 int rc = 0;
632 struct rmnet_ioctl_data_s ioctl_data;
633
634 /* Process IOCTL command */
635 switch (cmd) {
636 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
637 /* Perform Ethernet config only if in IP mode currently*/
638 if (p->operation_mode & RMNET_MODE_LLP_IP) {
639 ether_setup(dev);
640 random_ether_addr(dev->dev_addr);
641 dev->mtu = prev_mtu;
642
643 dev->netdev_ops = &rmnet_ops_ether;
644 spin_lock_irqsave(&p->lock, flags);
645 p->operation_mode &= ~RMNET_MODE_LLP_IP;
646 p->operation_mode |= RMNET_MODE_LLP_ETH;
647 spin_unlock_irqrestore(&p->lock, flags);
648 DBG0("[%s] rmnet_ioctl():set Ethernet protocol mode\n",
649 dev->name);
650 }
651 break;
652
653 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
654 /* Perform IP config only if in Ethernet mode currently*/
655 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
656
657 /* Undo config done in ether_setup() */
658 dev->header_ops = NULL; /* No header */
659 dev->type = ARPHRD_RAWIP;
660 dev->hard_header_len = 0;
661 dev->mtu = prev_mtu;
662 dev->addr_len = 0;
663 dev->flags &= ~(IFF_BROADCAST|
664 IFF_MULTICAST);
665
666 dev->needed_headroom = HEADROOM_FOR_BAM +
667 HEADROOM_FOR_QOS;
668 dev->needed_tailroom = TAILROOM;
669 dev->netdev_ops = &rmnet_ops_ip;
670 spin_lock_irqsave(&p->lock, flags);
671 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
672 p->operation_mode |= RMNET_MODE_LLP_IP;
673 spin_unlock_irqrestore(&p->lock, flags);
674 DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
675 dev->name);
676 }
677 break;
678
679 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
680 ioctl_data.u.operation_mode = (p->operation_mode &
681 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
682 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
683 sizeof(struct rmnet_ioctl_data_s)))
684 rc = -EFAULT;
685 break;
686
687 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
688 spin_lock_irqsave(&p->lock, flags);
689 p->operation_mode |= RMNET_MODE_QOS;
690 spin_unlock_irqrestore(&p->lock, flags);
691 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
692 dev->name);
693 break;
694
695 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
696 spin_lock_irqsave(&p->lock, flags);
697 p->operation_mode &= ~RMNET_MODE_QOS;
698 spin_unlock_irqrestore(&p->lock, flags);
699 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
700 dev->name);
701 break;
702
703 case RMNET_IOCTL_FLOW_ENABLE:
704 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
705 sizeof(struct rmnet_ioctl_data_s))) {
706 rc = -EFAULT;
707 break;
708 }
709 tc_qdisc_flow_control(dev, ioctl_data.u.tcm_handle, 1);
710 DBG0("[%s] rmnet_ioctl(): enabled flow\n", dev->name);
711 break;
712
713 case RMNET_IOCTL_FLOW_DISABLE:
714 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
715 sizeof(struct rmnet_ioctl_data_s))) {
716 rc = -EFAULT;
717 break;
718 }
719 tc_qdisc_flow_control(dev, ioctl_data.u.tcm_handle, 0);
720 DBG0("[%s] rmnet_ioctl(): disabled flow\n", dev->name);
721 break;
722
723 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
724 ioctl_data.u.operation_mode = (p->operation_mode
725 & RMNET_MODE_QOS);
726 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
727 sizeof(struct rmnet_ioctl_data_s)))
728 rc = -EFAULT;
729 break;
730
731 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
732 ioctl_data.u.operation_mode = p->operation_mode;
733 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
734 sizeof(struct rmnet_ioctl_data_s)))
735 rc = -EFAULT;
736 break;
737
738 case RMNET_IOCTL_OPEN: /* Open transport port */
739 rc = __rmnet_open(dev);
740 DBG0("[%s] rmnet_ioctl(): open transport port\n",
741 dev->name);
742 break;
743
744 case RMNET_IOCTL_CLOSE: /* Close transport port */
745 rc = __rmnet_close(dev);
746 DBG0("[%s] rmnet_ioctl(): close transport port\n",
747 dev->name);
748 break;
749
750 case RMNET_IOCTL_EXTENDED: /* Extended IOCTL's */
751 rc = rmnet_ioctl_extended(dev, ifr);
752 break;
753
754 default:
755 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%x]\n",
756 dev->name, cmd);
757 return -EINVAL;
758 }
759
760 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
761 dev->name, __func__, cmd, old_opmode, p->operation_mode);
762 return rc;
763}
764
765static void rmnet_setup(struct net_device *dev)
766{
767 /* Using Ethernet mode by default */
768 dev->netdev_ops = &rmnet_ops_ether;
769 ether_setup(dev);
770
771 /* set this after calling ether_setup */
772 dev->mtu = RMNET_DATA_LEN;
773 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS;
774 dev->needed_tailroom = TAILROOM;
775 random_ether_addr(dev->dev_addr);
776
777 dev->watchdog_timeo = 1000; /* 10 seconds? */
778}
779
780
781#ifdef CONFIG_MSM_RMNET_DEBUG
782static int rmnet_debug_init(struct net_device *dev)
783{
784
785 struct device *d;
786 struct rmnet_private *p;
787 int err = 0;
788
789 d = &(dev->dev);
790 p = netdev_priv(dev);
791 p->timeout_us = 0;
792 p->wakeups_xmit = p->wakeups_rcv = 0;
793 err = device_create_file(d, &dev_attr_timeout);
794 if (err)
795 return err;
796 err = device_create_file(d, &dev_attr_wakeups_xmit);
797 if (err)
798 return err;
799 err = device_create_file(d, &dev_attr_wakeups_rcv);
800 return err;
801}
802#else
803static int rmnet_debug_init(struct net_device *dev)
804{
805 return 0;
806}
807#endif
808
809static int bam_rmnet_probe(struct platform_device *pdev)
810{
811 int i, ret;
812 struct rmnet_private *p;
813 struct device *d;
814 char name[BAM_DMUX_CH_NAME_MAX_LEN];
815 struct net_device *dev;
816 const char *dev_name;
817
818 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
819 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
820 if (!strcmp(pdev->name, name))
821 break;
822 }
823
824 if (((i > BAM_DMUX_DATA_RMNET_7) && (i < BAM_DMUX_DATA_REV_RMNET_0)) ||
825 (i >= BAM_DMUX_NUM_CHANNELS)) {
826 pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
827 return -ENODEV;
828 }
829
830 if (i <= BAM_DMUX_DATA_RMNET_7)
831 dev_name = "rmnet%d";
832 else
833 dev_name = "rev_rmnet%d";
834
835 dev = alloc_netdev(sizeof(*p), dev_name, NET_NAME_ENUM, rmnet_setup);
836 if (!dev) {
837 pr_err("%s: no memory for netdev %d\n", __func__, i);
838 return -ENOMEM;
839 }
840
841 netdevs[i] = dev;
842 d = &(dev->dev);
843 p = netdev_priv(dev);
844 /* Initial config uses Ethernet */
845 p->operation_mode = RMNET_MODE_LLP_ETH;
846 p->ch_id = i;
847 p->waiting_for_ul_skb = NULL;
848 p->device_up = DEVICE_UNINITIALIZED;
849 spin_lock_init(&p->lock);
850 spin_lock_init(&p->tx_queue_lock);
851
852 ret = register_netdev(dev);
853 if (ret) {
854 pr_err("%s: unable to register netdev %d rc=%d\n",
855 __func__, i, ret);
856 netdevs[i] = NULL;
857 free_netdev(dev);
858 return ret;
859 }
860
861 rmnet_debug_init(dev);
862
863 return 0;
864}
865
866static int bam_rmnet_remove(struct platform_device *pdev)
867{
868 int i;
869 struct rmnet_private *p;
870 char name[BAM_DMUX_CH_NAME_MAX_LEN];
871
872 for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
873 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
874 if (!strcmp(pdev->name, name))
875 break;
876 }
877
878 if (((i > BAM_DMUX_DATA_RMNET_7) && (i < BAM_DMUX_DATA_REV_RMNET_0)) ||
879 (i >= BAM_DMUX_NUM_CHANNELS)) {
880 pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
881 return -ENODEV;
882 }
883
884 p = netdev_priv(netdevs[i]);
885 if (p->waiting_for_ul_skb != NULL) {
886 dev_kfree_skb_any(p->waiting_for_ul_skb);
887 p->waiting_for_ul_skb = NULL;
888 }
889 msm_bam_dmux_close(p->ch_id);
890 netif_carrier_off(netdevs[i]);
891 netif_stop_queue(netdevs[i]);
892
893 unregister_netdev(netdevs[i]);
894 free_netdev(netdevs[i]);
895
896 return 0;
897}
898
899#ifdef CONFIG_MSM_RMNET_DEBUG
900static void rmnet_clear_timeout_us(void)
901{
902 timeout_us = 0;
903}
904#else
905static void rmnet_clear_timeout_us(void)
906{
907 ; /*Do Nothing*/
908}
909#endif /* CONFIG_MSM_RMNET_DEBUG */
910
911static int __init rmnet_init(void)
912{
913 unsigned int n;
914 char *tempname;
915
916 rmnet_clear_timeout_us();
917
918 n = 0;
919 while (n <= BAM_DMUX_DATA_REV_RMNET_8) {
920 if ((n > BAM_DMUX_DATA_RMNET_7) &&
921 (n < BAM_DMUX_DATA_REV_RMNET_0)) {
922 n++;
923 continue;
924 }
925 bam_rmnet_drivers[n].probe = bam_rmnet_probe;
926 bam_rmnet_drivers[n].remove = bam_rmnet_remove;
927 tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
928 if (tempname == NULL) {
929 netdevs[n] = NULL;
930 return -ENOMEM;
931 }
932 scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
933 n);
934 bam_rmnet_drivers[n].driver.name = tempname;
935 bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
936 platform_driver_register(&bam_rmnet_drivers[n]);
937 n++;
938 }
939
940 return 0;
941}
942
943module_init(rmnet_init);
944MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
945MODULE_LICENSE("GPL v2");