blob: a0e82cbdc726ff4576dfa0b85e91a22e42653b0d [file] [log] [blame]
Hemant Kumar14401d52011-11-03 16:40:32 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
24#define MAX_RX_URBS 50
25#define RMNET_RX_BUFSIZE 2048
26
27#define STOP_SUBMIT_URB_LIMIT 400
28#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
54#define TX_HALT BIT(0)
55#define RX_HALT BIT(1)
56#define SUSPENDED BIT(2)
57
58struct data_bridge {
59 struct usb_interface *intf;
60 struct usb_device *udev;
61 unsigned int bulk_in;
62 unsigned int bulk_out;
63
64 /* keep track of in-flight URBs */
65 struct usb_anchor tx_active;
66 struct usb_anchor rx_active;
67
68 /* keep track of outgoing URBs during suspend */
69 struct usb_anchor delayed;
70
71 struct list_head rx_idle;
72 struct sk_buff_head rx_done;
73
74 struct workqueue_struct *wq;
75 struct work_struct process_rx_w;
76
77 struct bridge *brdg;
78
79 /* work queue function for handling halt conditions */
80 struct work_struct kevent;
81
82 unsigned long flags;
83
84 struct platform_device *pdev;
85
86 /* counters */
87 atomic_t pending_txurbs;
88 unsigned int txurb_drp_cnt;
89 unsigned long to_host;
90 unsigned long to_modem;
91 unsigned int tx_throttled_cnt;
92 unsigned int tx_unthrottled_cnt;
93 unsigned int rx_throttled_cnt;
94 unsigned int rx_unthrottled_cnt;
95};
96
97static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
98
99/* counter used for indexing data bridge devices */
100static int ch_id;
101
102static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
103 gfp_t flags);
104
105static inline bool rx_halted(struct data_bridge *dev)
106{
107 return test_bit(RX_HALT, &dev->flags);
108}
109
110static inline bool rx_throttled(struct bridge *brdg)
111{
112 return test_bit(RX_THROTTLED, &brdg->flags);
113}
114
115int data_bridge_unthrottle_rx(unsigned int id)
116{
117 struct data_bridge *dev;
118
119 if (id >= MAX_BRIDGE_DEVICES)
120 return -EINVAL;
121
122 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800123 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700124 return -ENODEV;
125
126 dev->rx_unthrottled_cnt++;
127 queue_work(dev->wq, &dev->process_rx_w);
128
129 return 0;
130}
131EXPORT_SYMBOL(data_bridge_unthrottle_rx);
132
133static void data_bridge_process_rx(struct work_struct *work)
134{
135 int retval;
136 unsigned long flags;
137 struct urb *rx_idle;
138 struct sk_buff *skb;
139 struct data_bridge *dev =
140 container_of(work, struct data_bridge, process_rx_w);
141
142 struct bridge *brdg = dev->brdg;
143
144 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
145 return;
146
147 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
148 dev->to_host++;
149 /* hand off sk_buff to client,they'll need to free it */
150 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
151 if (retval == -ENOTCONN || retval == -EINVAL) {
152 return;
153 } else if (retval == -EBUSY) {
154 dev->rx_throttled_cnt++;
155 break;
156 }
157 }
158
159 spin_lock_irqsave(&dev->rx_done.lock, flags);
160 if (dev->rx_done.qlen > stop_submit_urb_limit && rx_throttled(brdg)) {
161 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
162 return;
163 }
164
165 while (!list_empty(&dev->rx_idle)) {
166
167 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
168 list_del(&rx_idle->urb_list);
169 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
170 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
171 spin_lock_irqsave(&dev->rx_done.lock, flags);
172 if (retval)
173 break;
174 }
175 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
176}
177
178static void data_bridge_read_cb(struct urb *urb)
179{
180 struct bridge *brdg;
181 struct sk_buff *skb = urb->context;
182 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
183 bool queue = 0;
184
185 brdg = dev->brdg;
186
187 skb_put(skb, urb->actual_length);
188
189 switch (urb->status) {
190 case 0: /* success */
191 queue = 1;
192 spin_lock(&dev->rx_done.lock);
193 __skb_queue_tail(&dev->rx_done, skb);
194 spin_unlock(&dev->rx_done.lock);
195 break;
196
197 /*do not resubmit*/
198 case -EPIPE:
199 set_bit(RX_HALT, &dev->flags);
200 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
201 schedule_work(&dev->kevent);
202 /* FALLTHROUGH */
203 case -ESHUTDOWN:
204 case -ENOENT: /* suspended */
205 case -ECONNRESET: /* unplug */
206 case -EPROTO:
207 dev_kfree_skb_any(skb);
208 break;
209
210 /*resubmit */
211 case -EOVERFLOW: /*babble error*/
212 default:
213 queue = 1;
214 dev_kfree_skb_any(skb);
215 pr_debug_ratelimited("%s: non zero urb status = %d\n",
216 __func__, urb->status);
217 break;
218 }
219
220 spin_lock(&dev->rx_done.lock);
221 list_add_tail(&urb->urb_list, &dev->rx_idle);
222 spin_unlock(&dev->rx_done.lock);
223
224 if (queue)
225 queue_work(dev->wq, &dev->process_rx_w);
226}
227
228static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
229 gfp_t flags)
230{
231 struct sk_buff *skb;
232 int retval = -EINVAL;
233
234 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
235 if (!skb) {
236 usb_free_urb(rx_urb);
237 return -ENOMEM;
238 }
239
240 *((struct data_bridge **)skb->cb) = dev;
241
242 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
243 skb->data, RMNET_RX_BUFSIZE,
244 data_bridge_read_cb, skb);
245
246 if (test_bit(SUSPENDED, &dev->flags))
247 goto suspended;
248
249 usb_anchor_urb(rx_urb, &dev->rx_active);
250 retval = usb_submit_urb(rx_urb, flags);
251 if (retval)
252 goto fail;
253
254 return 0;
255fail:
256 usb_unanchor_urb(rx_urb);
257suspended:
258 dev_kfree_skb_any(skb);
259 usb_free_urb(rx_urb);
260 return retval;
261}
262
263static int data_bridge_prepare_rx(struct data_bridge *dev)
264{
265 int i;
266 struct urb *rx_urb;
267
268 for (i = 0; i < max_rx_urbs; i++) {
269 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
270 if (!rx_urb)
271 return -ENOMEM;
272
273 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
274 }
275 return 0;
276}
277
278int data_bridge_open(struct bridge *brdg)
279{
280 struct data_bridge *dev;
281
282 if (!brdg) {
283 err("bridge is null\n");
284 return -EINVAL;
285 }
286
287 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
288 return -EINVAL;
289
290 dev = __dev[brdg->ch_id];
291 if (!dev) {
292 err("dev is null\n");
293 return -ENODEV;
294 }
295
296 dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev);
297
298 dev->brdg = brdg;
299 atomic_set(&dev->pending_txurbs, 0);
300 dev->to_host = 0;
301 dev->to_modem = 0;
302 dev->txurb_drp_cnt = 0;
303 dev->tx_throttled_cnt = 0;
304 dev->tx_unthrottled_cnt = 0;
305 dev->rx_throttled_cnt = 0;
306 dev->rx_unthrottled_cnt = 0;
307
308 queue_work(dev->wq, &dev->process_rx_w);
309
310 return 0;
311}
312EXPORT_SYMBOL(data_bridge_open);
313
314void data_bridge_close(unsigned int id)
315{
316 struct data_bridge *dev;
317 struct sk_buff *skb;
318 unsigned long flags;
319
320 if (id >= MAX_BRIDGE_DEVICES)
321 return;
322
323 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800324 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700325 return;
326
327 dev_dbg(&dev->udev->dev, "%s:\n", __func__);
328
329 usb_unlink_anchored_urbs(&dev->tx_active);
330 usb_unlink_anchored_urbs(&dev->rx_active);
331 usb_unlink_anchored_urbs(&dev->delayed);
332
333 spin_lock_irqsave(&dev->rx_done.lock, flags);
334 while ((skb = __skb_dequeue(&dev->rx_done)))
335 dev_kfree_skb_any(skb);
336 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
337
338 dev->brdg = NULL;
339}
340EXPORT_SYMBOL(data_bridge_close);
341
342static void defer_kevent(struct work_struct *work)
343{
344 int status;
345 struct data_bridge *dev =
346 container_of(work, struct data_bridge, kevent);
347
348 if (!dev)
349 return;
350
351 if (test_bit(TX_HALT, &dev->flags)) {
352 usb_unlink_anchored_urbs(&dev->tx_active);
353
354 status = usb_autopm_get_interface(dev->intf);
355 if (status < 0) {
356 dev_err(&dev->udev->dev,
357 "can't acquire interface, status %d\n", status);
358 return;
359 }
360
361 status = usb_clear_halt(dev->udev, dev->bulk_out);
362 usb_autopm_put_interface(dev->intf);
363 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
364 dev_err(&dev->udev->dev,
365 "can't clear tx halt, status %d\n", status);
366 else
367 clear_bit(TX_HALT, &dev->flags);
368 }
369
370 if (test_bit(RX_HALT, &dev->flags)) {
371 usb_unlink_anchored_urbs(&dev->rx_active);
372
373 status = usb_autopm_get_interface(dev->intf);
374 if (status < 0) {
375 dev_err(&dev->udev->dev,
376 "can't acquire interface, status %d\n", status);
377 return;
378 }
379
380 status = usb_clear_halt(dev->udev, dev->bulk_in);
381 usb_autopm_put_interface(dev->intf);
382 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
383 dev_err(&dev->udev->dev,
384 "can't clear rx halt, status %d\n", status);
385 else {
386 clear_bit(RX_HALT, &dev->flags);
387 if (dev->brdg)
388 queue_work(dev->wq, &dev->process_rx_w);
389 }
390 }
391}
392
393static void data_bridge_write_cb(struct urb *urb)
394{
395 struct sk_buff *skb = urb->context;
396 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
397 struct bridge *brdg = dev->brdg;
398 int pending;
399
400 pr_debug("%s: dev:%p\n", __func__, dev);
401
402 switch (urb->status) {
403 case 0: /*success*/
404 break;
405 case -EPIPE:
406 set_bit(TX_HALT, &dev->flags);
407 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
408 schedule_work(&dev->kevent);
409 /* FALLTHROUGH */
410 case -ESHUTDOWN:
411 case -ENOENT: /* suspended */
412 case -ECONNRESET: /* unplug */
413 case -EOVERFLOW: /*babble error*/
414 /* FALLTHROUGH */
415 default:
416 pr_debug_ratelimited("%s: non zero urb status = %d\n",
417 __func__, urb->status);
418 }
419
420 usb_free_urb(urb);
421 dev_kfree_skb_any(skb);
422
423 pending = atomic_dec_return(&dev->pending_txurbs);
424
425 /*flow ctrl*/
426 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
427 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
428 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
429 __func__, pending);
430 dev->tx_unthrottled_cnt++;
431 if (brdg->ops.unthrottle_tx)
432 brdg->ops.unthrottle_tx(brdg->ctx);
433 }
434
435 usb_autopm_put_interface_async(dev->intf);
436}
437
438int data_bridge_write(unsigned int id, struct sk_buff *skb)
439{
440 int result;
441 int size = skb->len;
442 int pending;
443 struct urb *txurb;
444 struct data_bridge *dev = __dev[id];
445 struct bridge *brdg;
446
447 if (!dev || !dev->brdg || !usb_get_intfdata(dev->intf))
448 return -ENODEV;
449
450 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800451 if (!brdg)
452 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700453
454 dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len);
455
456 result = usb_autopm_get_interface(dev->intf);
457 if (result < 0) {
458 dev_err(&dev->udev->dev, "%s: resume failure\n", __func__);
459 goto error;
460 }
461
462 txurb = usb_alloc_urb(0, GFP_KERNEL);
463 if (!txurb) {
464 dev_err(&dev->udev->dev, "%s: error allocating read urb\n",
465 __func__);
466 result = -ENOMEM;
467 goto error;
468 }
469
470 /* store dev pointer in skb */
471 *((struct data_bridge **)skb->cb) = dev;
472
473 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
474 skb->data, skb->len, data_bridge_write_cb, skb);
475
476 if (test_bit(SUSPENDED, &dev->flags)) {
477 usb_anchor_urb(txurb, &dev->delayed);
478 goto free_urb;
479 }
480
481 pending = atomic_inc_return(&dev->pending_txurbs);
482 usb_anchor_urb(txurb, &dev->tx_active);
483
484 result = usb_submit_urb(txurb, GFP_KERNEL);
485 if (result < 0) {
486 usb_unanchor_urb(txurb);
487 atomic_dec(&dev->pending_txurbs);
488 dev_err(&dev->udev->dev, "%s: submit URB error %d\n",
489 __func__, result);
490 goto free_urb;
491 }
492
493 dev->to_modem++;
494 dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending);
495
496 /* flow control: last urb submitted but return -EBUSY */
497 if (fctrl_support && pending > fctrl_en_thld) {
498 set_bit(TX_THROTTLED, &brdg->flags);
499 dev->tx_throttled_cnt++;
500 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
501 __func__, pending);
502 return -EBUSY;
503 }
504
505 return size;
506
507free_urb:
508 usb_free_urb(txurb);
509error:
510 dev->txurb_drp_cnt++;
511 usb_autopm_put_interface(dev->intf);
512
513 return result;
514}
515EXPORT_SYMBOL(data_bridge_write);
516
517static int data_bridge_resume(struct data_bridge *dev)
518{
519 struct urb *urb;
520 int retval;
521
522 while ((urb = usb_get_from_anchor(&dev->delayed))) {
523 usb_anchor_urb(urb, &dev->tx_active);
524 atomic_inc(&dev->pending_txurbs);
525 retval = usb_submit_urb(urb, GFP_ATOMIC);
526 if (retval < 0) {
527 atomic_dec(&dev->pending_txurbs);
528 usb_unanchor_urb(urb);
529
530 /* TODO: need to free urb data */
531 usb_scuttle_anchored_urbs(&dev->delayed);
532 break;
533 }
534 dev->to_modem++;
535 dev->txurb_drp_cnt--;
536 }
537
538 clear_bit(SUSPENDED, &dev->flags);
539
540 if (dev->brdg)
541 queue_work(dev->wq, &dev->process_rx_w);
542
543 return 0;
544}
545
546static int bridge_resume(struct usb_interface *iface)
547{
548 int retval = 0;
549 int oldstate;
550 struct data_bridge *dev = usb_get_intfdata(iface);
551 struct bridge *brdg = dev->brdg;
552
553 oldstate = iface->dev.power.power_state.event;
554 iface->dev.power.power_state.event = PM_EVENT_ON;
555
556 retval = data_bridge_resume(dev);
557 if (!retval) {
558 if (oldstate & PM_EVENT_SUSPEND && brdg)
559 retval = ctrl_bridge_resume(brdg->ch_id);
560 }
561 return retval;
562}
563
564static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
565{
566 if (atomic_read(&dev->pending_txurbs) &&
567 (message.event & PM_EVENT_AUTO))
568 return -EBUSY;
569
570 set_bit(SUSPENDED, &dev->flags);
571
572 usb_kill_anchored_urbs(&dev->tx_active);
573 usb_kill_anchored_urbs(&dev->rx_active);
574
575 return 0;
576}
577
578static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
579{
580 int retval;
581 struct data_bridge *dev = usb_get_intfdata(intf);
582 struct bridge *brdg = dev->brdg;
583
584 retval = data_bridge_suspend(dev, message);
585 if (!retval) {
586 if (message.event & PM_EVENT_SUSPEND) {
587 if (brdg)
588 retval = ctrl_bridge_suspend(brdg->ch_id);
589 intf->dev.power.power_state.event = message.event;
590 }
591 } else {
592 dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n",
593 __func__);
594 }
595 return retval;
596}
597
598static int data_bridge_probe(struct usb_interface *iface,
599 struct usb_host_endpoint *bulk_in,
600 struct usb_host_endpoint *bulk_out, int id)
601{
602 struct data_bridge *dev;
603
604 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
605 if (!dev) {
606 err("%s: unable to allocate dev\n", __func__);
607 return -ENOMEM;
608 }
609
610 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
611 if (!dev->pdev) {
612 err("%s: unable to allocate platform device\n", __func__);
613 kfree(dev);
614 return -ENOMEM;
615 }
616
617 init_usb_anchor(&dev->tx_active);
618 init_usb_anchor(&dev->rx_active);
619 init_usb_anchor(&dev->delayed);
620
621 INIT_LIST_HEAD(&dev->rx_idle);
622 skb_queue_head_init(&dev->rx_done);
623
624 dev->wq = bridge_wq;
625
626 dev->udev = interface_to_usbdev(iface);
627 dev->intf = iface;
628
629 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
630 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
631
632 dev->bulk_out = usb_sndbulkpipe(dev->udev,
633 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
634
635 usb_set_intfdata(iface, dev);
636
637 INIT_WORK(&dev->kevent, defer_kevent);
638 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
639
640 __dev[id] = dev;
641
642 /*allocate list of rx urbs*/
643 data_bridge_prepare_rx(dev);
644
645 platform_device_add(dev->pdev);
646
647 return 0;
648}
649
650#if defined(CONFIG_DEBUG_FS)
651#define DEBUG_BUF_SIZE 1024
652static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
653 size_t count, loff_t *ppos)
654{
655 struct data_bridge *dev;
656 char *buf;
657 int ret;
658 int i;
659 int temp = 0;
660
661 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
662 if (!buf)
663 return -ENOMEM;
664
665 for (i = 0; i < ch_id; i++) {
666 dev = __dev[i];
667 if (!dev)
668 continue;
669
670 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
671 "\nName#%s dev %p\n"
672 "pending tx urbs: %u\n"
673 "tx urb drp cnt: %u\n"
674 "to host: %lu\n"
675 "to mdm: %lu\n"
676 "tx throttled cnt: %u\n"
677 "tx unthrottled cnt: %u\n"
678 "rx throttled cnt: %u\n"
679 "rx unthrottled cnt: %u\n"
680 "rx done skb qlen: %u\n"
681 "suspended: %d\n"
682 "TX_HALT: %d\n"
683 "RX_HALT: %d\n",
684 dev->pdev->name, dev,
685 atomic_read(&dev->pending_txurbs),
686 dev->txurb_drp_cnt,
687 dev->to_host,
688 dev->to_modem,
689 dev->tx_throttled_cnt,
690 dev->tx_unthrottled_cnt,
691 dev->rx_throttled_cnt,
692 dev->rx_unthrottled_cnt,
693 dev->rx_done.qlen,
694 test_bit(SUSPENDED, &dev->flags),
695 test_bit(TX_HALT, &dev->flags),
696 test_bit(RX_HALT, &dev->flags));
697
698 }
699
700 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
701
702 kfree(buf);
703
704 return ret;
705}
706
707static ssize_t data_bridge_reset_stats(struct file *file,
708 const char __user *buf, size_t count, loff_t *ppos)
709{
710 struct data_bridge *dev;
711 int i;
712
713 for (i = 0; i < ch_id; i++) {
714 dev = __dev[i];
715 if (!dev)
716 continue;
717
718 dev->to_host = 0;
719 dev->to_modem = 0;
720 dev->txurb_drp_cnt = 0;
721 dev->tx_throttled_cnt = 0;
722 dev->tx_unthrottled_cnt = 0;
723 dev->rx_throttled_cnt = 0;
724 dev->rx_unthrottled_cnt = 0;
725 }
726 return count;
727}
728
729const struct file_operations data_stats_ops = {
730 .read = data_bridge_read_stats,
731 .write = data_bridge_reset_stats,
732};
733
734struct dentry *data_dent;
735struct dentry *data_dfile;
736static void data_bridge_debugfs_init(void)
737{
738 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
739 if (IS_ERR(data_dent))
740 return;
741
742 data_dfile = debugfs_create_file("status", 0644, data_dent, 0,
743 &data_stats_ops);
744 if (!data_dfile || IS_ERR(data_dfile))
745 debugfs_remove(data_dent);
746}
747
748static void data_bridge_debugfs_exit(void)
749{
750 debugfs_remove(data_dfile);
751 debugfs_remove(data_dent);
752}
753
754#else
755static void data_bridge_debugfs_init(void) { }
756static void data_bridge_debugfs_exit(void) { }
757#endif
758
759static int __devinit
760bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
761{
762 struct usb_host_endpoint *endpoint = NULL;
763 struct usb_host_endpoint *bulk_in = NULL;
764 struct usb_host_endpoint *bulk_out = NULL;
765 struct usb_host_endpoint *int_in = NULL;
766 struct usb_device *udev;
767 int i;
768 int status = 0;
769 int numends;
770 int iface_num;
771
772 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
773
774 if (iface->num_altsetting != 1) {
775 err("%s invalid num_altsetting %u\n",
776 __func__, iface->num_altsetting);
777 return -EINVAL;
778 }
779
780 udev = interface_to_usbdev(iface);
781 usb_get_dev(udev);
782
783 if (iface_num != DUN_IFACE_NUM && iface_num != TETHERED_RMNET_IFACE_NUM)
Hemant Kumar46f9f242011-12-15 20:20:58 -0800784 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700785
786 numends = iface->cur_altsetting->desc.bNumEndpoints;
787 for (i = 0; i < numends; i++) {
788 endpoint = iface->cur_altsetting->endpoint + i;
789 if (!endpoint) {
790 dev_err(&udev->dev, "%s: invalid endpoint %u\n",
791 __func__, i);
792 status = -EINVAL;
793 goto out;
794 }
795
796 if (usb_endpoint_is_bulk_in(&endpoint->desc))
797 bulk_in = endpoint;
798 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
799 bulk_out = endpoint;
800 else if (usb_endpoint_is_int_in(&endpoint->desc))
801 int_in = endpoint;
802 }
803
804 if (!bulk_in || !bulk_out || !int_in) {
805 dev_err(&udev->dev, "%s: invalid endpoints\n", __func__);
806 status = -EINVAL;
807 goto out;
808 }
809
810 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
811 if (status < 0) {
812 dev_err(&udev->dev, "data_bridge_probe failed %d\n", status);
813 goto out;
814 }
815
816 status = ctrl_bridge_probe(iface, int_in, ch_id);
817 if (status < 0) {
818 dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status);
819 goto free_data_bridge;
820 }
821 ch_id++;
822
823 return 0;
824
825free_data_bridge:
826 platform_device_del(__dev[ch_id]->pdev);
827 usb_set_intfdata(iface, NULL);
828 kfree(__dev[ch_id]);
829 __dev[ch_id] = NULL;
830out:
831 usb_put_dev(udev);
832
833 return status;
834}
835
836static void bridge_disconnect(struct usb_interface *intf)
837{
838 struct data_bridge *dev = usb_get_intfdata(intf);
839 struct list_head *head;
840 struct urb *rx_urb;
841 unsigned long flags;
842 int iface_num;
843
844 if (!dev) {
845 err("%s: data device not found\n", __func__);
846 return;
847 }
848
849 iface_num = intf->cur_altsetting->desc.bInterfaceNumber;
850 if (iface_num != DUN_IFACE_NUM && iface_num != TETHERED_RMNET_IFACE_NUM)
851 return;
852
853 ch_id--;
854 ctrl_bridge_disconnect(ch_id);
855 platform_device_del(dev->pdev);
856 usb_set_intfdata(intf, NULL);
857 __dev[ch_id] = NULL;
858
859 cancel_work_sync(&dev->process_rx_w);
860 cancel_work_sync(&dev->kevent);
861
862 /*free rx urbs*/
863 head = &dev->rx_idle;
864 spin_lock_irqsave(&dev->rx_done.lock, flags);
865 while (!list_empty(head)) {
866 rx_urb = list_entry(head->next, struct urb, urb_list);
867 list_del(&rx_urb->urb_list);
868 usb_free_urb(rx_urb);
869 }
870 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
871
872 usb_put_dev(dev->udev);
873 kfree(dev);
874}
875
876static const struct usb_device_id bridge_ids[] = {
Jack Phamb1ad7152011-12-07 10:58:11 -0800877 { USB_DEVICE(0x5c6, 0x9001) },
878
879 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -0700880};
881
882MODULE_DEVICE_TABLE(usb, bridge_ids);
883
884static struct usb_driver bridge_driver = {
885 .name = "mdm_bridge",
886 .probe = bridge_probe,
887 .disconnect = bridge_disconnect,
888 .id_table = bridge_ids,
889 .suspend = bridge_suspend,
890 .resume = bridge_resume,
891 .supports_autosuspend = 1,
892};
893
894static int __init bridge_init(void)
895{
896 int ret;
897
898 ret = usb_register(&bridge_driver);
899 if (ret) {
900 err("%s: unable to register mdm_bridge driver", __func__);
901 return ret;
902 }
903
904 bridge_wq = create_singlethread_workqueue("mdm_bridge");
905 if (!bridge_wq) {
906 usb_deregister(&bridge_driver);
907 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
908 return -ENOMEM;
909 }
910
911 data_bridge_debugfs_init();
912
913 return 0;
914}
915
916static void __exit bridge_exit(void)
917{
918 data_bridge_debugfs_exit();
919 destroy_workqueue(bridge_wq);
920 usb_deregister(&bridge_driver);
921}
922
923module_init(bridge_init);
924module_exit(bridge_exit);
925
926MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
927MODULE_LICENSE("GPL v2");