blob: 6fa0ed64c4f80c8ac1c3df00206afc1a6a600151 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* linux/drivers/net/msm_rmnet.c
2 *
3 * Virtual Ethernet Interface for MSM7K Networking
4 *
5 * Copyright (C) 2007 Google, Inc.
6 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
7 * Author: Brian Swetland <swetland@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/init.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/wakelock.h>
31#include <linux/platform_device.h>
32#include <linux/if_arp.h>
33#include <linux/msm_rmnet.h>
34
35#ifdef CONFIG_HAS_EARLYSUSPEND
36#include <linux/earlysuspend.h>
37#endif
38
39#include <mach/msm_smd.h>
40#include <mach/peripheral-loader.h>
41
42/* Debug message support */
43static int msm_rmnet_debug_mask;
44module_param_named(debug_enable, msm_rmnet_debug_mask,
45 int, S_IRUGO | S_IWUSR | S_IWGRP);
46
47#define DEBUG_MASK_LVL0 (1U << 0)
48#define DEBUG_MASK_LVL1 (1U << 1)
49#define DEBUG_MASK_LVL2 (1U << 2)
50
51#define DBG(m, x...) do { \
52 if (msm_rmnet_debug_mask & m) \
53 pr_info(x); \
54} while (0)
55#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
56#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
57#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
58
59/* Configure device instances */
60#define RMNET_DEVICE_COUNT (8)
61static const char *ch_name[RMNET_DEVICE_COUNT] = {
62 "DATA5",
63 "DATA6",
64 "DATA7",
65 "DATA8",
66 "DATA9",
67 "DATA12",
68 "DATA13",
69 "DATA14",
70};
71
72/* XXX should come from smd headers */
73#define SMD_PORT_ETHER0 11
74
75/* allow larger frames */
76#define RMNET_DATA_LEN 2000
77
78#define HEADROOM_FOR_QOS 8
79
80static struct completion *port_complete[RMNET_DEVICE_COUNT];
81
82struct rmnet_private
83{
84 smd_channel_t *ch;
85 struct net_device_stats stats;
86 const char *chname;
87 struct wake_lock wake_lock;
88#ifdef CONFIG_MSM_RMNET_DEBUG
89 ktime_t last_packet;
90 unsigned long wakeups_xmit;
91 unsigned long wakeups_rcv;
92 unsigned long timeout_us;
93#endif
94 struct sk_buff *skb;
95 spinlock_t lock;
96 struct tasklet_struct tsklt;
97 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
98 struct platform_driver pdrv;
99 struct completion complete;
100 void *pil;
101 struct mutex pil_lock;
102};
103
104static uint msm_rmnet_modem_wait;
105module_param_named(modem_wait, msm_rmnet_modem_wait,
106 uint, S_IRUGO | S_IWUSR | S_IWGRP);
107
108/* Forward declaration */
109static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
110
111static int count_this_packet(void *_hdr, int len)
112{
113 struct ethhdr *hdr = _hdr;
114
115 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
116 return 0;
117
118 return 1;
119}
120
121#ifdef CONFIG_MSM_RMNET_DEBUG
122static unsigned long timeout_us;
123
124#ifdef CONFIG_HAS_EARLYSUSPEND
125/*
126 * If early suspend is enabled then we specify two timeout values,
127 * screen on (default), and screen is off.
128 */
129static unsigned long timeout_suspend_us;
130static struct device *rmnet0;
131
132/* Set timeout in us when the screen is off. */
133static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
134{
135 timeout_suspend_us = simple_strtoul(buf, NULL, 10);
136 return n;
137}
138
139static ssize_t timeout_suspend_show(struct device *d,
140 struct device_attribute *attr,
141 char *buf)
142{
143 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
144}
145
146static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
147
148static void rmnet_early_suspend(struct early_suspend *handler) {
149 if (rmnet0) {
150 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
151 p->timeout_us = timeout_suspend_us;
152 }
153}
154
155static void rmnet_late_resume(struct early_suspend *handler) {
156 if (rmnet0) {
157 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
158 p->timeout_us = timeout_us;
159 }
160}
161
162static struct early_suspend rmnet_power_suspend = {
163 .suspend = rmnet_early_suspend,
164 .resume = rmnet_late_resume,
165};
166
167static int __init rmnet_late_init(void)
168{
169 register_early_suspend(&rmnet_power_suspend);
170 return 0;
171}
172
173late_initcall(rmnet_late_init);
174#endif
175
176/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
177static int rmnet_cause_wakeup(struct rmnet_private *p) {
178 int ret = 0;
179 ktime_t now;
180 if (p->timeout_us == 0) /* Check if disabled */
181 return 0;
182
183 /* Use real (wall) time. */
184 now = ktime_get_real();
185
186 if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
187 ret = 1;
188 }
189 p->last_packet = now;
190 return ret;
191}
192
193static ssize_t wakeups_xmit_show(struct device *d,
194 struct device_attribute *attr,
195 char *buf)
196{
197 struct rmnet_private *p = netdev_priv(to_net_dev(d));
198 return sprintf(buf, "%lu\n", p->wakeups_xmit);
199}
200
201DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
202
203static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
204 char *buf)
205{
206 struct rmnet_private *p = netdev_priv(to_net_dev(d));
207 return sprintf(buf, "%lu\n", p->wakeups_rcv);
208}
209
210DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
211
212/* Set timeout in us. */
213static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
214 const char *buf, size_t n)
215{
216#ifndef CONFIG_HAS_EARLYSUSPEND
217 struct rmnet_private *p = netdev_priv(to_net_dev(d));
218 p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
219#else
220/* If using early suspend/resume hooks do not write the value on store. */
221 timeout_us = simple_strtoul(buf, NULL, 10);
222#endif
223 return n;
224}
225
226static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
227 char *buf)
228{
229 struct rmnet_private *p = netdev_priv(to_net_dev(d));
230 p = netdev_priv(to_net_dev(d));
231 return sprintf(buf, "%lu\n", timeout_us);
232}
233
234DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
235#endif
236
237static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
238{
239 __be16 protocol = 0;
240
241 skb->dev = dev;
242
243 /* Determine L3 protocol */
244 switch (skb->data[0] & 0xf0) {
245 case 0x40:
246 protocol = htons(ETH_P_IP);
247 break;
248 case 0x60:
249 protocol = htons(ETH_P_IPV6);
250 break;
251 default:
252 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
253 dev->name, skb->data[0] & 0xf0);
254 /* skb will be dropped in uppder layer for unknown protocol */
255 }
256 return protocol;
257}
258
Jeff Hugod2e2c492011-07-27 14:15:06 -0600259static void smd_net_data_handler(unsigned long arg);
260static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);
261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262/* Called in soft-irq context */
263static void smd_net_data_handler(unsigned long arg)
264{
265 struct net_device *dev = (struct net_device *) arg;
266 struct rmnet_private *p = netdev_priv(dev);
267 struct sk_buff *skb;
268 void *ptr = 0;
269 int sz;
270 u32 opmode = p->operation_mode;
271 unsigned long flags;
272
273 for (;;) {
274 sz = smd_cur_packet_size(p->ch);
275 if (sz == 0) break;
276 if (smd_read_avail(p->ch) < sz) break;
277
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600278 skb = dev_alloc_skb(sz + NET_IP_ALIGN);
279 if (skb == NULL) {
280 pr_err("[%s] rmnet_recv() cannot allocate skb\n",
281 dev->name);
Jeff Hugod2e2c492011-07-27 14:15:06 -0600282 /* out of memory, reschedule a later attempt */
283 smd_net_data_tasklet.data = (unsigned long)dev;
284 tasklet_schedule(&smd_net_data_tasklet);
285 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 } else {
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600287 skb->dev = dev;
288 skb_reserve(skb, NET_IP_ALIGN);
289 ptr = skb_put(skb, sz);
290 wake_lock_timeout(&p->wake_lock, HZ / 2);
291 if (smd_read(p->ch, ptr, sz) != sz) {
292 pr_err("[%s] rmnet_recv() smd lied about avail?!",
293 dev->name);
294 ptr = 0;
295 dev_kfree_skb_irq(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 } else {
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600297 /* Handle Rx frame format */
298 spin_lock_irqsave(&p->lock, flags);
299 opmode = p->operation_mode;
300 spin_unlock_irqrestore(&p->lock, flags);
301
302 if (RMNET_IS_MODE_IP(opmode)) {
303 /* Driver in IP mode */
304 skb->protocol =
305 rmnet_ip_type_trans(skb, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 } else {
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600307 /* Driver in Ethernet mode */
308 skb->protocol =
309 eth_type_trans(skb, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 }
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600311 if (RMNET_IS_MODE_IP(opmode) ||
312 count_this_packet(ptr, skb->len)) {
313#ifdef CONFIG_MSM_RMNET_DEBUG
314 p->wakeups_rcv +=
315 rmnet_cause_wakeup(p);
316#endif
317 p->stats.rx_packets++;
318 p->stats.rx_bytes += skb->len;
319 }
320 DBG1("[%s] Rx packet #%lu len=%d\n",
321 dev->name, p->stats.rx_packets,
322 skb->len);
323
324 /* Deliver to network stack */
325 netif_rx(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 }
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600327 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 }
329 if (smd_read(p->ch, ptr, sz) != sz)
330 pr_err("[%s] rmnet_recv() smd lied about avail?!",
331 dev->name);
332 }
333}
334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
336{
337 struct rmnet_private *p = netdev_priv(dev);
338 smd_channel_t *ch = p->ch;
339 int smd_ret;
340 struct QMI_QOS_HDR_S *qmih;
341 u32 opmode;
342 unsigned long flags;
343
344 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
345 spin_lock_irqsave(&p->lock, flags);
346 opmode = p->operation_mode;
347 spin_unlock_irqrestore(&p->lock, flags);
348
349 if (RMNET_IS_MODE_QOS(opmode)) {
350 qmih = (struct QMI_QOS_HDR_S *)
351 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
352 qmih->version = 1;
353 qmih->flags = 0;
354 qmih->flow_id = skb->mark;
355 }
356
357 dev->trans_start = jiffies;
358 smd_ret = smd_write(ch, skb->data, skb->len);
359 if (smd_ret != skb->len) {
360 pr_err("[%s] %s: smd_write returned error %d",
361 dev->name, __func__, smd_ret);
362 p->stats.tx_errors++;
363 goto xmit_out;
364 }
365
366 if (RMNET_IS_MODE_IP(opmode) ||
367 count_this_packet(skb->data, skb->len)) {
368 p->stats.tx_packets++;
369 p->stats.tx_bytes += skb->len;
370#ifdef CONFIG_MSM_RMNET_DEBUG
371 p->wakeups_xmit += rmnet_cause_wakeup(p);
372#endif
373 }
374 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
375 dev->name, p->stats.tx_packets, skb->len, skb->mark);
376
377xmit_out:
378 /* data xmited, safe to release skb */
379 dev_kfree_skb_irq(skb);
380 return 0;
381}
382
383static void _rmnet_resume_flow(unsigned long param)
384{
385 struct net_device *dev = (struct net_device *)param;
386 struct rmnet_private *p = netdev_priv(dev);
387 struct sk_buff *skb = NULL;
388 unsigned long flags;
389
390 /* xmit and enable the flow only once even if
391 multiple tasklets were scheduled by smd_net_notify */
392 spin_lock_irqsave(&p->lock, flags);
393 if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
394 skb = p->skb;
395 p->skb = NULL;
396 spin_unlock_irqrestore(&p->lock, flags);
397 _rmnet_xmit(skb, dev);
398 netif_wake_queue(dev);
399 } else
400 spin_unlock_irqrestore(&p->lock, flags);
401}
402
403static void msm_rmnet_unload_modem(void *pil)
404{
405 if (pil)
406 pil_put(pil);
407}
408
409static void *msm_rmnet_load_modem(struct net_device *dev)
410{
411 void *pil;
412 int rc;
413 struct rmnet_private *p = netdev_priv(dev);
414
415 pil = pil_get("modem");
416 if (IS_ERR(pil))
417 pr_err("[%s] %s: modem load failed\n",
418 dev->name, __func__);
419 else if (msm_rmnet_modem_wait) {
420 rc = wait_for_completion_interruptible_timeout(
421 &p->complete,
422 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
423 if (!rc)
424 rc = -ETIMEDOUT;
425 if (rc < 0) {
426 pr_err("[%s] %s: wait for rmnet port failed %d\n",
427 dev->name, __func__, rc);
428 msm_rmnet_unload_modem(pil);
429 pil = ERR_PTR(rc);
430 }
431 }
432
433 return pil;
434}
435
436static void smd_net_notify(void *_dev, unsigned event)
437{
438 struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
439
440 switch (event) {
441 case SMD_EVENT_DATA:
442 spin_lock(&p->lock);
443 if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
444 smd_disable_read_intr(p->ch);
445 tasklet_hi_schedule(&p->tsklt);
446 }
447
448 spin_unlock(&p->lock);
449
450 if (smd_read_avail(p->ch) &&
451 (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
452 smd_net_data_tasklet.data = (unsigned long) _dev;
453 tasklet_schedule(&smd_net_data_tasklet);
454 }
455 break;
456
457 case SMD_EVENT_OPEN:
458 DBG0("%s: opening SMD port\n", __func__);
459 netif_carrier_on(_dev);
460 if (netif_queue_stopped(_dev)) {
461 DBG0("%s: re-starting if queue\n", __func__);
462 netif_wake_queue(_dev);
463 }
464 break;
465
466 case SMD_EVENT_CLOSE:
467 DBG0("%s: closing SMD port\n", __func__);
468 netif_carrier_off(_dev);
469 break;
470 }
471}
472
473static int __rmnet_open(struct net_device *dev)
474{
475 int r;
476 void *pil;
477 struct rmnet_private *p = netdev_priv(dev);
478
479 mutex_lock(&p->pil_lock);
480 if (!p->pil) {
481 pil = msm_rmnet_load_modem(dev);
482 if (IS_ERR(pil)) {
483 mutex_unlock(&p->pil_lock);
484 return PTR_ERR(pil);
485 }
486 p->pil = pil;
487 }
488 mutex_unlock(&p->pil_lock);
489
490 if (!p->ch) {
491 r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
492
493 if (r < 0)
494 return -ENODEV;
495 }
496
497 smd_disable_read_intr(p->ch);
498 return 0;
499}
500
501static int __rmnet_close(struct net_device *dev)
502{
503 struct rmnet_private *p = netdev_priv(dev);
504 int rc;
505 unsigned long flags;
506
507 if (p->ch) {
508 rc = smd_close(p->ch);
509 spin_lock_irqsave(&p->lock, flags);
510 p->ch = 0;
511 spin_unlock_irqrestore(&p->lock, flags);
512 return rc;
513 } else
514 return -EBADF;
515}
516
517static int rmnet_open(struct net_device *dev)
518{
519 int rc = 0;
520
521 DBG0("[%s] rmnet_open()\n", dev->name);
522
523 rc = __rmnet_open(dev);
524 if (rc == 0)
525 netif_start_queue(dev);
526
527 return rc;
528}
529
530static int rmnet_stop(struct net_device *dev)
531{
532 struct rmnet_private *p = netdev_priv(dev);
533
534 DBG0("[%s] rmnet_stop()\n", dev->name);
535
536 netif_stop_queue(dev);
537 tasklet_kill(&p->tsklt);
538
539 /* TODO: unload modem safely,
540 currently, this causes unnecessary unloads */
541 /*
542 mutex_lock(&p->pil_lock);
543 msm_rmnet_unload_modem(p->pil);
544 p->pil = NULL;
545 mutex_unlock(&p->pil_lock);
546 */
547
548 return 0;
549}
550
551static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
552{
553 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
554 return -EINVAL;
555
556 DBG0("[%s] MTU change: old=%d new=%d\n",
557 dev->name, dev->mtu, new_mtu);
558 dev->mtu = new_mtu;
559
560 return 0;
561}
562
563static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
564{
565 struct rmnet_private *p = netdev_priv(dev);
566 smd_channel_t *ch = p->ch;
567 unsigned long flags;
568
569 if (netif_queue_stopped(dev)) {
570 pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
571 dev->name);
572 return 0;
573 }
574
575 spin_lock_irqsave(&p->lock, flags);
576 smd_enable_read_intr(ch);
577 if (smd_write_avail(ch) < skb->len) {
578 netif_stop_queue(dev);
579 p->skb = skb;
580 spin_unlock_irqrestore(&p->lock, flags);
581 return 0;
582 }
583 smd_disable_read_intr(ch);
584 spin_unlock_irqrestore(&p->lock, flags);
585
586 _rmnet_xmit(skb, dev);
587
588 return 0;
589}
590
591static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
592{
593 struct rmnet_private *p = netdev_priv(dev);
594 return &p->stats;
595}
596
597static void rmnet_set_multicast_list(struct net_device *dev)
598{
599}
600
601static void rmnet_tx_timeout(struct net_device *dev)
602{
603 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
604}
605
606
607static const struct net_device_ops rmnet_ops_ether = {
608 .ndo_open = rmnet_open,
609 .ndo_stop = rmnet_stop,
610 .ndo_start_xmit = rmnet_xmit,
611 .ndo_get_stats = rmnet_get_stats,
612 .ndo_set_multicast_list = rmnet_set_multicast_list,
613 .ndo_tx_timeout = rmnet_tx_timeout,
614 .ndo_do_ioctl = rmnet_ioctl,
615 .ndo_change_mtu = rmnet_change_mtu,
616 .ndo_set_mac_address = eth_mac_addr,
617 .ndo_validate_addr = eth_validate_addr,
618};
619
620static const struct net_device_ops rmnet_ops_ip = {
621 .ndo_open = rmnet_open,
622 .ndo_stop = rmnet_stop,
623 .ndo_start_xmit = rmnet_xmit,
624 .ndo_get_stats = rmnet_get_stats,
625 .ndo_set_multicast_list = rmnet_set_multicast_list,
626 .ndo_tx_timeout = rmnet_tx_timeout,
627 .ndo_do_ioctl = rmnet_ioctl,
628 .ndo_change_mtu = rmnet_change_mtu,
629 .ndo_set_mac_address = 0,
630 .ndo_validate_addr = 0,
631};
632
633static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
634{
635 struct rmnet_private *p = netdev_priv(dev);
636 u32 old_opmode = p->operation_mode;
637 unsigned long flags;
638 int prev_mtu = dev->mtu;
639 int rc = 0;
640
641 /* Process IOCTL command */
642 switch (cmd) {
643 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
644 /* Perform Ethernet config only if in IP mode currently*/
645 if (p->operation_mode & RMNET_MODE_LLP_IP) {
646 ether_setup(dev);
647 random_ether_addr(dev->dev_addr);
648 dev->mtu = prev_mtu;
649
650 dev->netdev_ops = &rmnet_ops_ether;
651 spin_lock_irqsave(&p->lock, flags);
652 p->operation_mode &= ~RMNET_MODE_LLP_IP;
653 p->operation_mode |= RMNET_MODE_LLP_ETH;
654 spin_unlock_irqrestore(&p->lock, flags);
655 DBG0("[%s] rmnet_ioctl(): "
656 "set Ethernet protocol mode\n",
657 dev->name);
658 }
659 break;
660
661 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
662 /* Perform IP config only if in Ethernet mode currently*/
663 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
664
665 /* Undo config done in ether_setup() */
666 dev->header_ops = 0; /* No header */
667 dev->type = ARPHRD_RAWIP;
668 dev->hard_header_len = 0;
669 dev->mtu = prev_mtu;
670 dev->addr_len = 0;
671 dev->flags &= ~(IFF_BROADCAST|
672 IFF_MULTICAST);
673
674 dev->netdev_ops = &rmnet_ops_ip;
675 spin_lock_irqsave(&p->lock, flags);
676 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
677 p->operation_mode |= RMNET_MODE_LLP_IP;
678 spin_unlock_irqrestore(&p->lock, flags);
679 DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
680 dev->name);
681 }
682 break;
683
684 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
685 ifr->ifr_ifru.ifru_data =
686 (void *)(p->operation_mode &
687 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
688 break;
689
690 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
691 spin_lock_irqsave(&p->lock, flags);
692 p->operation_mode |= RMNET_MODE_QOS;
693 spin_unlock_irqrestore(&p->lock, flags);
694 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
695 dev->name);
696 break;
697
698 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
699 spin_lock_irqsave(&p->lock, flags);
700 p->operation_mode &= ~RMNET_MODE_QOS;
701 spin_unlock_irqrestore(&p->lock, flags);
702 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
703 dev->name);
704 break;
705
706 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
707 ifr->ifr_ifru.ifru_data =
708 (void *)(p->operation_mode & RMNET_MODE_QOS);
709 break;
710
711 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
712 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
713 break;
714
715 case RMNET_IOCTL_OPEN: /* Open transport port */
716 rc = __rmnet_open(dev);
717 DBG0("[%s] rmnet_ioctl(): open transport port\n",
718 dev->name);
719 break;
720
721 case RMNET_IOCTL_CLOSE: /* Close transport port */
722 rc = __rmnet_close(dev);
723 DBG0("[%s] rmnet_ioctl(): close transport port\n",
724 dev->name);
725 break;
726
727 default:
728 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
729 dev->name, cmd);
730 return -EINVAL;
731 }
732
733 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
734 dev->name, __func__, cmd, old_opmode, p->operation_mode);
735 return rc;
736}
737
738
739static void __init rmnet_setup(struct net_device *dev)
740{
741 /* Using Ethernet mode by default */
742 dev->netdev_ops = &rmnet_ops_ether;
743 ether_setup(dev);
744
745 /* set this after calling ether_setup */
746 dev->mtu = RMNET_DATA_LEN;
747 dev->needed_headroom = HEADROOM_FOR_QOS;
748
749 random_ether_addr(dev->dev_addr);
750
751 dev->watchdog_timeo = 1000; /* 10 seconds? */
752}
753
754static int msm_rmnet_smd_probe(struct platform_device *pdev)
755{
756 int i;
757
758 for (i = 0; i < RMNET_DEVICE_COUNT; i++)
759 if (!strcmp(pdev->name, ch_name[i])) {
760 complete_all(port_complete[i]);
761 break;
762 }
763
764 return 0;
765}
766
767static int __init rmnet_init(void)
768{
769 int ret;
770 struct device *d;
771 struct net_device *dev;
772 struct rmnet_private *p;
773 unsigned n;
774
775 pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
776
777#ifdef CONFIG_MSM_RMNET_DEBUG
778 timeout_us = 0;
779#ifdef CONFIG_HAS_EARLYSUSPEND
780 timeout_suspend_us = 0;
781#endif
782#endif
783
784 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
785 dev = alloc_netdev(sizeof(struct rmnet_private),
786 "rmnet%d", rmnet_setup);
787
788 if (!dev)
789 return -ENOMEM;
790
791 d = &(dev->dev);
792 p = netdev_priv(dev);
793 p->chname = ch_name[n];
794 /* Initial config uses Ethernet */
795 p->operation_mode = RMNET_MODE_LLP_ETH;
796 p->skb = NULL;
797 spin_lock_init(&p->lock);
798 tasklet_init(&p->tsklt, _rmnet_resume_flow,
799 (unsigned long)dev);
800 wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
801#ifdef CONFIG_MSM_RMNET_DEBUG
802 p->timeout_us = timeout_us;
803 p->wakeups_xmit = p->wakeups_rcv = 0;
804#endif
805
806 init_completion(&p->complete);
807 port_complete[n] = &p->complete;
808 mutex_init(&p->pil_lock);
809 p->pdrv.probe = msm_rmnet_smd_probe;
810 p->pdrv.driver.name = ch_name[n];
811 p->pdrv.driver.owner = THIS_MODULE;
812 ret = platform_driver_register(&p->pdrv);
813 if (ret) {
814 free_netdev(dev);
815 return ret;
816 }
817
818 ret = register_netdev(dev);
819 if (ret) {
820 platform_driver_unregister(&p->pdrv);
821 free_netdev(dev);
822 return ret;
823 }
824
825
826#ifdef CONFIG_MSM_RMNET_DEBUG
827 if (device_create_file(d, &dev_attr_timeout))
828 continue;
829 if (device_create_file(d, &dev_attr_wakeups_xmit))
830 continue;
831 if (device_create_file(d, &dev_attr_wakeups_rcv))
832 continue;
833#ifdef CONFIG_HAS_EARLYSUSPEND
834 if (device_create_file(d, &dev_attr_timeout_suspend))
835 continue;
836
837 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
838 if (n == 0)
839 rmnet0 = d;
840#endif
841#endif
842 }
843 return 0;
844}
845
846module_init(rmnet_init);