blob: f11fb60217545cf2d8f3597b516d9e744e707652 [file] [log] [blame]
Jeff Hugo4838f412012-01-20 11:19:37 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060031#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#ifdef CONFIG_HAS_EARLYSUSPEND
34#include <linux/earlysuspend.h>
35#endif
36
37#include <mach/bam_dmux.h>
38
39/* Debug message support */
40static int msm_rmnet_bam_debug_mask;
41module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
42 int, S_IRUGO | S_IWUSR | S_IWGRP);
43
44#define DEBUG_MASK_LVL0 (1U << 0)
45#define DEBUG_MASK_LVL1 (1U << 1)
46#define DEBUG_MASK_LVL2 (1U << 2)
47
48#define DBG(m, x...) do { \
49 if (msm_rmnet_bam_debug_mask & m) \
50 pr_info(x); \
51} while (0)
52#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
53#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
54#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
55
56/* Configure device instances */
57#define RMNET_DEVICE_COUNT (8)
58
59/* allow larger frames */
60#define RMNET_DATA_LEN 2000
61
62#define DEVICE_ID_INVALID -1
63
64#define DEVICE_INACTIVE 0
65#define DEVICE_ACTIVE 1
66
67#define HEADROOM_FOR_BAM 8 /* for mux header */
68#define HEADROOM_FOR_QOS 8
69#define TAILROOM 8 /* for padding by mux layer */
70
71struct rmnet_private {
72 struct net_device_stats stats;
73 uint32_t ch_id;
74#ifdef CONFIG_MSM_RMNET_DEBUG
75 ktime_t last_packet;
76 unsigned long wakeups_xmit;
77 unsigned long wakeups_rcv;
78 unsigned long timeout_us;
79#endif
80 struct sk_buff *skb;
81 spinlock_t lock;
82 struct tasklet_struct tsklt;
83 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
84 uint8_t device_up;
Jeff Hugobac7ea22011-10-24 10:58:48 -060085 uint8_t waiting_for_ul;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060086 uint8_t in_reset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087};
88
Jeff Hugobac7ea22011-10-24 10:58:48 -060089static uint8_t ul_is_connected;
90
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091#ifdef CONFIG_MSM_RMNET_DEBUG
92static unsigned long timeout_us;
93
94#ifdef CONFIG_HAS_EARLYSUSPEND
95/*
96 * If early suspend is enabled then we specify two timeout values,
97 * screen on (default), and screen is off.
98 */
99static unsigned long timeout_suspend_us;
100static struct device *rmnet0;
101
102/* Set timeout in us when the screen is off. */
103static ssize_t timeout_suspend_store(struct device *d,
104 struct device_attribute *attr,
105 const char *buf, size_t n)
106{
107 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
108 return n;
109}
110
111static ssize_t timeout_suspend_show(struct device *d,
112 struct device_attribute *attr,
113 char *buf)
114{
115 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
116}
117
118static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
119 timeout_suspend_store);
120
121static void rmnet_early_suspend(struct early_suspend *handler)
122{
123 if (rmnet0) {
124 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
125 p->timeout_us = timeout_suspend_us;
126 }
127}
128
129static void rmnet_late_resume(struct early_suspend *handler)
130{
131 if (rmnet0) {
132 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
133 p->timeout_us = timeout_us;
134 }
135}
136
137static struct early_suspend rmnet_power_suspend = {
138 .suspend = rmnet_early_suspend,
139 .resume = rmnet_late_resume,
140};
141
142static int __init rmnet_late_init(void)
143{
144 register_early_suspend(&rmnet_power_suspend);
145 return 0;
146}
147
148late_initcall(rmnet_late_init);
149#endif
150
151/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
152static int rmnet_cause_wakeup(struct rmnet_private *p)
153{
154 int ret = 0;
155 ktime_t now;
156 if (p->timeout_us == 0) /* Check if disabled */
157 return 0;
158
159 /* Use real (wall) time. */
160 now = ktime_get_real();
161
162 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
163 ret = 1;
164
165 p->last_packet = now;
166 return ret;
167}
168
169static ssize_t wakeups_xmit_show(struct device *d,
170 struct device_attribute *attr,
171 char *buf)
172{
173 struct rmnet_private *p = netdev_priv(to_net_dev(d));
174 return sprintf(buf, "%lu\n", p->wakeups_xmit);
175}
176
177DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
178
179static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
180 char *buf)
181{
182 struct rmnet_private *p = netdev_priv(to_net_dev(d));
183 return sprintf(buf, "%lu\n", p->wakeups_rcv);
184}
185
186DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
187
188/* Set timeout in us. */
189static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
190 const char *buf, size_t n)
191{
192#ifndef CONFIG_HAS_EARLYSUSPEND
193 struct rmnet_private *p = netdev_priv(to_net_dev(d));
194 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
195#else
196/* If using early suspend/resume hooks do not write the value on store. */
197 timeout_us = strict_strtoul(buf, NULL, 10);
198#endif
199 return n;
200}
201
202static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
203 char *buf)
204{
205 struct rmnet_private *p = netdev_priv(to_net_dev(d));
206 p = netdev_priv(to_net_dev(d));
207 return sprintf(buf, "%lu\n", timeout_us);
208}
209
210DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
211#endif
212
213
214/* Forward declaration */
215static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
216
217static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
218{
219 __be16 protocol = 0;
220
221 skb->dev = dev;
222
223 /* Determine L3 protocol */
224 switch (skb->data[0] & 0xf0) {
225 case 0x40:
226 protocol = htons(ETH_P_IP);
227 break;
228 case 0x60:
229 protocol = htons(ETH_P_IPV6);
230 break;
231 default:
232 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
233 dev->name, skb->data[0] & 0xf0);
234 /* skb will be dropped in upper layer for unknown protocol */
235 }
236 return protocol;
237}
238
239static int count_this_packet(void *_hdr, int len)
240{
241 struct ethhdr *hdr = _hdr;
242
243 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
244 return 0;
245
246 return 1;
247}
248
249/* Rx Callback, Called in Work Queue context */
250static void bam_recv_notify(void *dev, struct sk_buff *skb)
251{
252 struct rmnet_private *p = netdev_priv(dev);
253 unsigned long flags;
254 u32 opmode;
255
256 if (skb) {
257 skb->dev = dev;
258 /* Handle Rx frame format */
259 spin_lock_irqsave(&p->lock, flags);
260 opmode = p->operation_mode;
261 spin_unlock_irqrestore(&p->lock, flags);
262
263 if (RMNET_IS_MODE_IP(opmode)) {
264 /* Driver in IP mode */
265 skb->protocol = rmnet_ip_type_trans(skb, dev);
266 } else {
267 /* Driver in Ethernet mode */
268 skb->protocol = eth_type_trans(skb, dev);
269 }
270 if (RMNET_IS_MODE_IP(opmode) ||
271 count_this_packet(skb->data, skb->len)) {
272#ifdef CONFIG_MSM_RMNET_DEBUG
273 p->wakeups_rcv += rmnet_cause_wakeup(p);
274#endif
275 p->stats.rx_packets++;
276 p->stats.rx_bytes += skb->len;
277 }
278 DBG1("[%s] Rx packet #%lu len=%d\n",
279 ((struct net_device *)dev)->name,
280 p->stats.rx_packets, skb->len);
281
282 /* Deliver to network stack */
283 netif_rx(skb);
284 } else
285 pr_err("[%s] %s: No skb received",
286 ((struct net_device *)dev)->name, __func__);
287}
288
289static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
290{
291 struct rmnet_private *p = netdev_priv(dev);
292 int bam_ret;
293 struct QMI_QOS_HDR_S *qmih;
294 u32 opmode;
295 unsigned long flags;
296
297 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
298 spin_lock_irqsave(&p->lock, flags);
299 opmode = p->operation_mode;
300 spin_unlock_irqrestore(&p->lock, flags);
301
302 if (RMNET_IS_MODE_QOS(opmode)) {
303 qmih = (struct QMI_QOS_HDR_S *)
304 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
305 qmih->version = 1;
306 qmih->flags = 0;
307 qmih->flow_id = skb->mark;
308 }
309
310 dev->trans_start = jiffies;
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700311 /* if write() succeeds, skb access is unsafe in this process */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
313
Jeff Hugo4838f412012-01-20 11:19:37 -0700314 if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 pr_err("[%s] %s: write returned error %d",
316 dev->name, __func__, bam_ret);
Jeff Hugo523de142012-01-06 10:52:09 -0700317 return -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 }
319
Jeff Hugo523de142012-01-06 10:52:09 -0700320 return bam_ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321}
322
323static void bam_write_done(void *dev, struct sk_buff *skb)
324{
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700325 struct rmnet_private *p = netdev_priv(dev);
326 u32 opmode = p->operation_mode;
327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 DBG1("%s: write complete\n", __func__);
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700329 if (RMNET_IS_MODE_IP(opmode) ||
330 count_this_packet(skb->data, skb->len)) {
331 p->stats.tx_packets++;
332 p->stats.tx_bytes += skb->len;
333#ifdef CONFIG_MSM_RMNET_DEBUG
334 p->wakeups_xmit += rmnet_cause_wakeup(p);
335#endif
336 }
337 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
338 ((struct net_device *)(dev))->name, p->stats.tx_packets,
339 skb->len, skb->mark);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 dev_kfree_skb_any(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700341 if (netif_queue_stopped(dev) &&
342 msm_bam_dmux_is_ch_low(p->ch_id)) {
343 DBG0("%s: Low WM hit, waking queue=%p\n",
344 __func__, skb);
345 netif_wake_queue(dev);
346 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347}
348
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600349static void bam_notify(void *dev, int event, unsigned long data)
350{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600351 struct rmnet_private *p = netdev_priv(dev);
352
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600353 switch (event) {
354 case BAM_DMUX_RECEIVE:
355 bam_recv_notify(dev, (struct sk_buff *)(data));
356 break;
357 case BAM_DMUX_WRITE_DONE:
358 bam_write_done(dev, (struct sk_buff *)(data));
359 break;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600360 case BAM_DMUX_UL_CONNECTED:
361 ul_is_connected = 1;
362 if (p->waiting_for_ul) {
363 netif_wake_queue(dev);
364 p->waiting_for_ul = 0;
365 }
366 break;
367 case BAM_DMUX_UL_DISCONNECTED:
368 ul_is_connected = 0;
369 break;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600370 }
371}
372
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373static int __rmnet_open(struct net_device *dev)
374{
375 int r;
376 struct rmnet_private *p = netdev_priv(dev);
377
378 DBG0("[%s] __rmnet_open()\n", dev->name);
379
380 if (!p->device_up) {
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600381 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382
Eric Holmberg70385482011-11-09 10:33:51 -0700383 if (r < 0) {
384 DBG0("%s: ch=%d failed with rc %d\n",
385 __func__, p->ch_id, r);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 return -ENODEV;
Eric Holmberg70385482011-11-09 10:33:51 -0700387 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 }
389
390 p->device_up = DEVICE_ACTIVE;
391 return 0;
392}
393
394static int rmnet_open(struct net_device *dev)
395{
396 int rc = 0;
397
398 DBG0("[%s] rmnet_open()\n", dev->name);
399
400 rc = __rmnet_open(dev);
401
402 if (rc == 0)
403 netif_start_queue(dev);
404
405 return rc;
406}
407
408
409static int __rmnet_close(struct net_device *dev)
410{
411 struct rmnet_private *p = netdev_priv(dev);
412 int rc = 0;
413
414 if (p->device_up) {
415 /* do not close rmnet port once up, this causes
416 remote side to hang if tried to open again */
417 p->device_up = DEVICE_INACTIVE;
418 return rc;
419 } else
420 return -EBADF;
421}
422
423
424static int rmnet_stop(struct net_device *dev)
425{
426 DBG0("[%s] rmnet_stop()\n", dev->name);
427
428 __rmnet_close(dev);
429 netif_stop_queue(dev);
430
431 return 0;
432}
433
434static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
435{
436 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
437 return -EINVAL;
438
439 DBG0("[%s] MTU change: old=%d new=%d\n",
440 dev->name, dev->mtu, new_mtu);
441 dev->mtu = new_mtu;
442
443 return 0;
444}
445
446static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
447{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600448 struct rmnet_private *p = netdev_priv(dev);
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700449 int ret = 0;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 if (netif_queue_stopped(dev)) {
452 pr_err("[%s]fatal: rmnet_xmit called when "
453 "netif_queue is stopped", dev->name);
454 return 0;
455 }
456
Jeff Hugobac7ea22011-10-24 10:58:48 -0600457 if (!ul_is_connected) {
Jeff Hugo523de142012-01-06 10:52:09 -0700458 netif_stop_queue(dev);
Jeff Hugobac7ea22011-10-24 10:58:48 -0600459 p->waiting_for_ul = 1;
460 msm_bam_dmux_kickoff_ul_wakeup();
461 return NETDEV_TX_BUSY;
462 }
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700463 ret = _rmnet_xmit(skb, dev);
Jeff Hugo523de142012-01-06 10:52:09 -0700464 if (ret == -EPERM)
465 return NETDEV_TX_BUSY;
466
Jeff Hugo4838f412012-01-20 11:19:37 -0700467 /*
468 * detected SSR a bit early. shut some things down now, and leave
469 * the rest to the main ssr handling code when that happens later
470 */
471 if (ret == -EFAULT) {
472 netif_carrier_off(dev);
473 dev_kfree_skb_any(skb);
474 return 0;
475 }
476
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700477 if (ret == -EAGAIN) {
Jeff Hugo523de142012-01-06 10:52:09 -0700478 /*
479 * This should not happen
480 * EAGAIN means we attempted to overflow the high watermark
481 * Clearly the queue is not stopped like it should be, so
482 * stop it and return BUSY to the TCP/IP framework. It will
483 * retry this packet with the queue is restarted which happens
484 * in the write_done callback when the low watermark is hit.
485 */
486 netif_stop_queue(dev);
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700487 return NETDEV_TX_BUSY;
488 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700490 if (msm_bam_dmux_is_ch_full(p->ch_id)) {
491 netif_stop_queue(dev);
492 DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
493 }
494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 return 0;
496}
497
498static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
499{
500 struct rmnet_private *p = netdev_priv(dev);
501 return &p->stats;
502}
503
504static void rmnet_set_multicast_list(struct net_device *dev)
505{
506}
507
508static void rmnet_tx_timeout(struct net_device *dev)
509{
510 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
511}
512
513static const struct net_device_ops rmnet_ops_ether = {
514 .ndo_open = rmnet_open,
515 .ndo_stop = rmnet_stop,
516 .ndo_start_xmit = rmnet_xmit,
517 .ndo_get_stats = rmnet_get_stats,
518 .ndo_set_multicast_list = rmnet_set_multicast_list,
519 .ndo_tx_timeout = rmnet_tx_timeout,
520 .ndo_do_ioctl = rmnet_ioctl,
521 .ndo_change_mtu = rmnet_change_mtu,
522 .ndo_set_mac_address = eth_mac_addr,
523 .ndo_validate_addr = eth_validate_addr,
524};
525
526static const struct net_device_ops rmnet_ops_ip = {
527 .ndo_open = rmnet_open,
528 .ndo_stop = rmnet_stop,
529 .ndo_start_xmit = rmnet_xmit,
530 .ndo_get_stats = rmnet_get_stats,
531 .ndo_set_multicast_list = rmnet_set_multicast_list,
532 .ndo_tx_timeout = rmnet_tx_timeout,
533 .ndo_do_ioctl = rmnet_ioctl,
534 .ndo_change_mtu = rmnet_change_mtu,
535 .ndo_set_mac_address = 0,
536 .ndo_validate_addr = 0,
537};
538
539static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
540{
541 struct rmnet_private *p = netdev_priv(dev);
542 u32 old_opmode = p->operation_mode;
543 unsigned long flags;
544 int prev_mtu = dev->mtu;
545 int rc = 0;
546
547 /* Process IOCTL command */
548 switch (cmd) {
549 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
550 /* Perform Ethernet config only if in IP mode currently*/
551 if (p->operation_mode & RMNET_MODE_LLP_IP) {
552 ether_setup(dev);
553 random_ether_addr(dev->dev_addr);
554 dev->mtu = prev_mtu;
555
556 dev->netdev_ops = &rmnet_ops_ether;
557 spin_lock_irqsave(&p->lock, flags);
558 p->operation_mode &= ~RMNET_MODE_LLP_IP;
559 p->operation_mode |= RMNET_MODE_LLP_ETH;
560 spin_unlock_irqrestore(&p->lock, flags);
561 DBG0("[%s] rmnet_ioctl(): "
562 "set Ethernet protocol mode\n",
563 dev->name);
564 }
565 break;
566
567 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
568 /* Perform IP config only if in Ethernet mode currently*/
569 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
570
571 /* Undo config done in ether_setup() */
572 dev->header_ops = 0; /* No header */
573 dev->type = ARPHRD_RAWIP;
574 dev->hard_header_len = 0;
575 dev->mtu = prev_mtu;
576 dev->addr_len = 0;
577 dev->flags &= ~(IFF_BROADCAST|
578 IFF_MULTICAST);
579
580 dev->needed_headroom = HEADROOM_FOR_BAM +
581 HEADROOM_FOR_QOS;
582 dev->needed_tailroom = TAILROOM;
583 dev->netdev_ops = &rmnet_ops_ip;
584 spin_lock_irqsave(&p->lock, flags);
585 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
586 p->operation_mode |= RMNET_MODE_LLP_IP;
587 spin_unlock_irqrestore(&p->lock, flags);
588 DBG0("[%s] rmnet_ioctl(): "
589 "set IP protocol mode\n",
590 dev->name);
591 }
592 break;
593
594 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
595 ifr->ifr_ifru.ifru_data =
596 (void *)(p->operation_mode &
597 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
598 break;
599
600 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
601 spin_lock_irqsave(&p->lock, flags);
602 p->operation_mode |= RMNET_MODE_QOS;
603 spin_unlock_irqrestore(&p->lock, flags);
604 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
605 dev->name);
606 break;
607
608 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
609 spin_lock_irqsave(&p->lock, flags);
610 p->operation_mode &= ~RMNET_MODE_QOS;
611 spin_unlock_irqrestore(&p->lock, flags);
612 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
613 dev->name);
614 break;
615
616 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
617 ifr->ifr_ifru.ifru_data =
618 (void *)(p->operation_mode & RMNET_MODE_QOS);
619 break;
620
621 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
622 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
623 break;
624
625 case RMNET_IOCTL_OPEN: /* Open transport port */
626 rc = __rmnet_open(dev);
627 DBG0("[%s] rmnet_ioctl(): open transport port\n",
628 dev->name);
629 break;
630
631 case RMNET_IOCTL_CLOSE: /* Close transport port */
632 rc = __rmnet_close(dev);
633 DBG0("[%s] rmnet_ioctl(): close transport port\n",
634 dev->name);
635 break;
636
637 default:
638 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
639 dev->name, cmd);
640 return -EINVAL;
641 }
642
643 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
644 dev->name, __func__, cmd, old_opmode, p->operation_mode);
645 return rc;
646}
647
648static void __init rmnet_setup(struct net_device *dev)
649{
650 /* Using Ethernet mode by default */
651 dev->netdev_ops = &rmnet_ops_ether;
652 ether_setup(dev);
653
654 /* set this after calling ether_setup */
655 dev->mtu = RMNET_DATA_LEN;
656 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
657 dev->needed_tailroom = TAILROOM;
658 random_ether_addr(dev->dev_addr);
659
660 dev->watchdog_timeo = 1000; /* 10 seconds? */
661}
662
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600663static struct net_device *netdevs[RMNET_DEVICE_COUNT];
664static struct platform_driver bam_rmnet_drivers[RMNET_DEVICE_COUNT];
665
666static int bam_rmnet_probe(struct platform_device *pdev)
667{
668 int i;
669 char name[BAM_DMUX_CH_NAME_MAX_LEN];
670 struct rmnet_private *p;
671
672 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
673 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
674 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
675 break;
676 }
677
678 p = netdev_priv(netdevs[i]);
679 if (p->in_reset) {
680 p->in_reset = 0;
681 msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify);
682 netif_carrier_on(netdevs[i]);
683 netif_start_queue(netdevs[i]);
684 }
685
686 return 0;
687}
688
689static int bam_rmnet_remove(struct platform_device *pdev)
690{
691 int i;
692 char name[BAM_DMUX_CH_NAME_MAX_LEN];
693 struct rmnet_private *p;
694
695 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
696 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
697 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
698 break;
699 }
700
701 p = netdev_priv(netdevs[i]);
702 p->in_reset = 1;
Jeff Hugo4838f412012-01-20 11:19:37 -0700703 p->waiting_for_ul = 0;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600704 msm_bam_dmux_close(p->ch_id);
705 netif_carrier_off(netdevs[i]);
706 netif_stop_queue(netdevs[i]);
707 return 0;
708}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709
710static int __init rmnet_init(void)
711{
712 int ret;
713 struct device *d;
714 struct net_device *dev;
715 struct rmnet_private *p;
716 unsigned n;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600717 char *tempname;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718
719 pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
720
721#ifdef CONFIG_MSM_RMNET_DEBUG
722 timeout_us = 0;
723#ifdef CONFIG_HAS_EARLYSUSPEND
724 timeout_suspend_us = 0;
725#endif
726#endif
727
728 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
729 dev = alloc_netdev(sizeof(struct rmnet_private),
730 "rmnet%d", rmnet_setup);
731
Eric Holmberg70385482011-11-09 10:33:51 -0700732 if (!dev) {
733 pr_err("%s: no memory for netdev %d\n", __func__, n);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 return -ENOMEM;
Eric Holmberg70385482011-11-09 10:33:51 -0700735 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600737 netdevs[n] = dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 d = &(dev->dev);
739 p = netdev_priv(dev);
740 /* Initial config uses Ethernet */
741 p->operation_mode = RMNET_MODE_LLP_ETH;
742 p->ch_id = n;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600743 p->waiting_for_ul = 0;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600744 p->in_reset = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 spin_lock_init(&p->lock);
746#ifdef CONFIG_MSM_RMNET_DEBUG
747 p->timeout_us = timeout_us;
748 p->wakeups_xmit = p->wakeups_rcv = 0;
749#endif
750
751 ret = register_netdev(dev);
752 if (ret) {
Eric Holmberg70385482011-11-09 10:33:51 -0700753 pr_err("%s: unable to register netdev"
754 " %d rc=%d\n", __func__, n, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 free_netdev(dev);
756 return ret;
757 }
758
759#ifdef CONFIG_MSM_RMNET_DEBUG
760 if (device_create_file(d, &dev_attr_timeout))
761 continue;
762 if (device_create_file(d, &dev_attr_wakeups_xmit))
763 continue;
764 if (device_create_file(d, &dev_attr_wakeups_rcv))
765 continue;
766#ifdef CONFIG_HAS_EARLYSUSPEND
767 if (device_create_file(d, &dev_attr_timeout_suspend))
768 continue;
769
770 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
771 if (n == 0)
772 rmnet0 = d;
773#endif
774#endif
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600775 bam_rmnet_drivers[n].probe = bam_rmnet_probe;
776 bam_rmnet_drivers[n].remove = bam_rmnet_remove;
777 tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
778 if (tempname == NULL)
779 return -ENOMEM;
780 scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
781 n);
782 bam_rmnet_drivers[n].driver.name = tempname;
783 bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
784 ret = platform_driver_register(&bam_rmnet_drivers[n]);
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700785 if (ret) {
786 pr_err("%s: registration failed n=%d rc=%d\n",
787 __func__, n, ret);
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600788 return ret;
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700789 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 }
791 return 0;
792}
793
794module_init(rmnet_init);
795MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
796MODULE_LICENSE("GPL v2");
797