blob: 90c32cb8fdd9d5c8956ebef0e7db1edc5547d106 [file] [log] [blame]
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Hemant Kumar14401d52011-11-03 16:40:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
24#define MAX_RX_URBS 50
25#define RMNET_RX_BUFSIZE 2048
26
Hemant Kumarfd2d5e52011-12-22 17:51:53 -080027#define STOP_SUBMIT_URB_LIMIT 500
Hemant Kumar14401d52011-11-03 16:40:32 -070028#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
54#define TX_HALT BIT(0)
55#define RX_HALT BIT(1)
56#define SUSPENDED BIT(2)
57
58struct data_bridge {
59 struct usb_interface *intf;
60 struct usb_device *udev;
61 unsigned int bulk_in;
62 unsigned int bulk_out;
63
64 /* keep track of in-flight URBs */
65 struct usb_anchor tx_active;
66 struct usb_anchor rx_active;
67
68 /* keep track of outgoing URBs during suspend */
69 struct usb_anchor delayed;
70
71 struct list_head rx_idle;
72 struct sk_buff_head rx_done;
73
74 struct workqueue_struct *wq;
75 struct work_struct process_rx_w;
76
77 struct bridge *brdg;
78
79 /* work queue function for handling halt conditions */
80 struct work_struct kevent;
81
82 unsigned long flags;
83
84 struct platform_device *pdev;
85
86 /* counters */
87 atomic_t pending_txurbs;
88 unsigned int txurb_drp_cnt;
89 unsigned long to_host;
90 unsigned long to_modem;
91 unsigned int tx_throttled_cnt;
92 unsigned int tx_unthrottled_cnt;
93 unsigned int rx_throttled_cnt;
94 unsigned int rx_unthrottled_cnt;
95};
96
97static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
98
99/* counter used for indexing data bridge devices */
100static int ch_id;
101
102static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
103 gfp_t flags);
104
105static inline bool rx_halted(struct data_bridge *dev)
106{
107 return test_bit(RX_HALT, &dev->flags);
108}
109
110static inline bool rx_throttled(struct bridge *brdg)
111{
112 return test_bit(RX_THROTTLED, &brdg->flags);
113}
114
115int data_bridge_unthrottle_rx(unsigned int id)
116{
117 struct data_bridge *dev;
118
119 if (id >= MAX_BRIDGE_DEVICES)
120 return -EINVAL;
121
122 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800123 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700124 return -ENODEV;
125
126 dev->rx_unthrottled_cnt++;
127 queue_work(dev->wq, &dev->process_rx_w);
128
129 return 0;
130}
131EXPORT_SYMBOL(data_bridge_unthrottle_rx);
132
133static void data_bridge_process_rx(struct work_struct *work)
134{
135 int retval;
136 unsigned long flags;
137 struct urb *rx_idle;
138 struct sk_buff *skb;
139 struct data_bridge *dev =
140 container_of(work, struct data_bridge, process_rx_w);
141
142 struct bridge *brdg = dev->brdg;
143
144 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
145 return;
146
147 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
148 dev->to_host++;
149 /* hand off sk_buff to client,they'll need to free it */
150 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
151 if (retval == -ENOTCONN || retval == -EINVAL) {
152 return;
153 } else if (retval == -EBUSY) {
154 dev->rx_throttled_cnt++;
155 break;
156 }
157 }
158
159 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar14401d52011-11-03 16:40:32 -0700160 while (!list_empty(&dev->rx_idle)) {
Hemant Kumarfd2d5e52011-12-22 17:51:53 -0800161 if (dev->rx_done.qlen > stop_submit_urb_limit)
162 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700163
164 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
165 list_del(&rx_idle->urb_list);
166 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
167 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
168 spin_lock_irqsave(&dev->rx_done.lock, flags);
169 if (retval)
170 break;
171 }
172 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
173}
174
175static void data_bridge_read_cb(struct urb *urb)
176{
177 struct bridge *brdg;
178 struct sk_buff *skb = urb->context;
179 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
180 bool queue = 0;
181
182 brdg = dev->brdg;
183
184 skb_put(skb, urb->actual_length);
185
186 switch (urb->status) {
187 case 0: /* success */
188 queue = 1;
189 spin_lock(&dev->rx_done.lock);
190 __skb_queue_tail(&dev->rx_done, skb);
191 spin_unlock(&dev->rx_done.lock);
192 break;
193
194 /*do not resubmit*/
195 case -EPIPE:
196 set_bit(RX_HALT, &dev->flags);
197 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
198 schedule_work(&dev->kevent);
199 /* FALLTHROUGH */
200 case -ESHUTDOWN:
201 case -ENOENT: /* suspended */
202 case -ECONNRESET: /* unplug */
203 case -EPROTO:
204 dev_kfree_skb_any(skb);
205 break;
206
207 /*resubmit */
208 case -EOVERFLOW: /*babble error*/
209 default:
210 queue = 1;
211 dev_kfree_skb_any(skb);
212 pr_debug_ratelimited("%s: non zero urb status = %d\n",
213 __func__, urb->status);
214 break;
215 }
216
217 spin_lock(&dev->rx_done.lock);
218 list_add_tail(&urb->urb_list, &dev->rx_idle);
219 spin_unlock(&dev->rx_done.lock);
220
221 if (queue)
222 queue_work(dev->wq, &dev->process_rx_w);
223}
224
225static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
226 gfp_t flags)
227{
228 struct sk_buff *skb;
229 int retval = -EINVAL;
230
231 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
232 if (!skb) {
233 usb_free_urb(rx_urb);
234 return -ENOMEM;
235 }
236
237 *((struct data_bridge **)skb->cb) = dev;
238
239 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
240 skb->data, RMNET_RX_BUFSIZE,
241 data_bridge_read_cb, skb);
242
243 if (test_bit(SUSPENDED, &dev->flags))
244 goto suspended;
245
246 usb_anchor_urb(rx_urb, &dev->rx_active);
247 retval = usb_submit_urb(rx_urb, flags);
248 if (retval)
249 goto fail;
250
251 return 0;
252fail:
253 usb_unanchor_urb(rx_urb);
254suspended:
255 dev_kfree_skb_any(skb);
256 usb_free_urb(rx_urb);
257 return retval;
258}
259
260static int data_bridge_prepare_rx(struct data_bridge *dev)
261{
262 int i;
263 struct urb *rx_urb;
264
265 for (i = 0; i < max_rx_urbs; i++) {
266 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
267 if (!rx_urb)
268 return -ENOMEM;
269
270 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
271 }
272 return 0;
273}
274
275int data_bridge_open(struct bridge *brdg)
276{
277 struct data_bridge *dev;
278
279 if (!brdg) {
280 err("bridge is null\n");
281 return -EINVAL;
282 }
283
284 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
285 return -EINVAL;
286
287 dev = __dev[brdg->ch_id];
288 if (!dev) {
289 err("dev is null\n");
290 return -ENODEV;
291 }
292
293 dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev);
294
295 dev->brdg = brdg;
296 atomic_set(&dev->pending_txurbs, 0);
297 dev->to_host = 0;
298 dev->to_modem = 0;
299 dev->txurb_drp_cnt = 0;
300 dev->tx_throttled_cnt = 0;
301 dev->tx_unthrottled_cnt = 0;
302 dev->rx_throttled_cnt = 0;
303 dev->rx_unthrottled_cnt = 0;
304
305 queue_work(dev->wq, &dev->process_rx_w);
306
307 return 0;
308}
309EXPORT_SYMBOL(data_bridge_open);
310
311void data_bridge_close(unsigned int id)
312{
313 struct data_bridge *dev;
314 struct sk_buff *skb;
315 unsigned long flags;
316
317 if (id >= MAX_BRIDGE_DEVICES)
318 return;
319
320 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800321 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700322 return;
323
324 dev_dbg(&dev->udev->dev, "%s:\n", __func__);
325
326 usb_unlink_anchored_urbs(&dev->tx_active);
327 usb_unlink_anchored_urbs(&dev->rx_active);
328 usb_unlink_anchored_urbs(&dev->delayed);
329
330 spin_lock_irqsave(&dev->rx_done.lock, flags);
331 while ((skb = __skb_dequeue(&dev->rx_done)))
332 dev_kfree_skb_any(skb);
333 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
334
335 dev->brdg = NULL;
336}
337EXPORT_SYMBOL(data_bridge_close);
338
339static void defer_kevent(struct work_struct *work)
340{
341 int status;
342 struct data_bridge *dev =
343 container_of(work, struct data_bridge, kevent);
344
345 if (!dev)
346 return;
347
348 if (test_bit(TX_HALT, &dev->flags)) {
349 usb_unlink_anchored_urbs(&dev->tx_active);
350
351 status = usb_autopm_get_interface(dev->intf);
352 if (status < 0) {
353 dev_err(&dev->udev->dev,
354 "can't acquire interface, status %d\n", status);
355 return;
356 }
357
358 status = usb_clear_halt(dev->udev, dev->bulk_out);
359 usb_autopm_put_interface(dev->intf);
360 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
361 dev_err(&dev->udev->dev,
362 "can't clear tx halt, status %d\n", status);
363 else
364 clear_bit(TX_HALT, &dev->flags);
365 }
366
367 if (test_bit(RX_HALT, &dev->flags)) {
368 usb_unlink_anchored_urbs(&dev->rx_active);
369
370 status = usb_autopm_get_interface(dev->intf);
371 if (status < 0) {
372 dev_err(&dev->udev->dev,
373 "can't acquire interface, status %d\n", status);
374 return;
375 }
376
377 status = usb_clear_halt(dev->udev, dev->bulk_in);
378 usb_autopm_put_interface(dev->intf);
379 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
380 dev_err(&dev->udev->dev,
381 "can't clear rx halt, status %d\n", status);
382 else {
383 clear_bit(RX_HALT, &dev->flags);
384 if (dev->brdg)
385 queue_work(dev->wq, &dev->process_rx_w);
386 }
387 }
388}
389
390static void data_bridge_write_cb(struct urb *urb)
391{
392 struct sk_buff *skb = urb->context;
393 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
394 struct bridge *brdg = dev->brdg;
395 int pending;
396
397 pr_debug("%s: dev:%p\n", __func__, dev);
398
399 switch (urb->status) {
400 case 0: /*success*/
401 break;
402 case -EPIPE:
403 set_bit(TX_HALT, &dev->flags);
404 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
405 schedule_work(&dev->kevent);
406 /* FALLTHROUGH */
407 case -ESHUTDOWN:
408 case -ENOENT: /* suspended */
409 case -ECONNRESET: /* unplug */
410 case -EOVERFLOW: /*babble error*/
411 /* FALLTHROUGH */
412 default:
413 pr_debug_ratelimited("%s: non zero urb status = %d\n",
414 __func__, urb->status);
415 }
416
417 usb_free_urb(urb);
418 dev_kfree_skb_any(skb);
419
420 pending = atomic_dec_return(&dev->pending_txurbs);
421
422 /*flow ctrl*/
423 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
424 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
425 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
426 __func__, pending);
427 dev->tx_unthrottled_cnt++;
428 if (brdg->ops.unthrottle_tx)
429 brdg->ops.unthrottle_tx(brdg->ctx);
430 }
431
432 usb_autopm_put_interface_async(dev->intf);
433}
434
435int data_bridge_write(unsigned int id, struct sk_buff *skb)
436{
437 int result;
438 int size = skb->len;
439 int pending;
440 struct urb *txurb;
441 struct data_bridge *dev = __dev[id];
442 struct bridge *brdg;
443
444 if (!dev || !dev->brdg || !usb_get_intfdata(dev->intf))
445 return -ENODEV;
446
447 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800448 if (!brdg)
449 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700450
451 dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len);
452
453 result = usb_autopm_get_interface(dev->intf);
454 if (result < 0) {
455 dev_err(&dev->udev->dev, "%s: resume failure\n", __func__);
456 goto error;
457 }
458
459 txurb = usb_alloc_urb(0, GFP_KERNEL);
460 if (!txurb) {
461 dev_err(&dev->udev->dev, "%s: error allocating read urb\n",
462 __func__);
463 result = -ENOMEM;
464 goto error;
465 }
466
467 /* store dev pointer in skb */
468 *((struct data_bridge **)skb->cb) = dev;
469
470 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
471 skb->data, skb->len, data_bridge_write_cb, skb);
472
473 if (test_bit(SUSPENDED, &dev->flags)) {
474 usb_anchor_urb(txurb, &dev->delayed);
475 goto free_urb;
476 }
477
478 pending = atomic_inc_return(&dev->pending_txurbs);
479 usb_anchor_urb(txurb, &dev->tx_active);
480
481 result = usb_submit_urb(txurb, GFP_KERNEL);
482 if (result < 0) {
483 usb_unanchor_urb(txurb);
484 atomic_dec(&dev->pending_txurbs);
485 dev_err(&dev->udev->dev, "%s: submit URB error %d\n",
486 __func__, result);
487 goto free_urb;
488 }
489
490 dev->to_modem++;
491 dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending);
492
493 /* flow control: last urb submitted but return -EBUSY */
494 if (fctrl_support && pending > fctrl_en_thld) {
495 set_bit(TX_THROTTLED, &brdg->flags);
496 dev->tx_throttled_cnt++;
497 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
498 __func__, pending);
499 return -EBUSY;
500 }
501
502 return size;
503
504free_urb:
505 usb_free_urb(txurb);
506error:
507 dev->txurb_drp_cnt++;
508 usb_autopm_put_interface(dev->intf);
509
510 return result;
511}
512EXPORT_SYMBOL(data_bridge_write);
513
514static int data_bridge_resume(struct data_bridge *dev)
515{
516 struct urb *urb;
517 int retval;
518
519 while ((urb = usb_get_from_anchor(&dev->delayed))) {
520 usb_anchor_urb(urb, &dev->tx_active);
521 atomic_inc(&dev->pending_txurbs);
522 retval = usb_submit_urb(urb, GFP_ATOMIC);
523 if (retval < 0) {
524 atomic_dec(&dev->pending_txurbs);
525 usb_unanchor_urb(urb);
526
527 /* TODO: need to free urb data */
528 usb_scuttle_anchored_urbs(&dev->delayed);
529 break;
530 }
531 dev->to_modem++;
532 dev->txurb_drp_cnt--;
533 }
534
535 clear_bit(SUSPENDED, &dev->flags);
536
537 if (dev->brdg)
538 queue_work(dev->wq, &dev->process_rx_w);
539
540 return 0;
541}
542
543static int bridge_resume(struct usb_interface *iface)
544{
545 int retval = 0;
546 int oldstate;
547 struct data_bridge *dev = usb_get_intfdata(iface);
548 struct bridge *brdg = dev->brdg;
549
550 oldstate = iface->dev.power.power_state.event;
551 iface->dev.power.power_state.event = PM_EVENT_ON;
552
553 retval = data_bridge_resume(dev);
554 if (!retval) {
555 if (oldstate & PM_EVENT_SUSPEND && brdg)
556 retval = ctrl_bridge_resume(brdg->ch_id);
557 }
558 return retval;
559}
560
561static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
562{
563 if (atomic_read(&dev->pending_txurbs) &&
564 (message.event & PM_EVENT_AUTO))
565 return -EBUSY;
566
567 set_bit(SUSPENDED, &dev->flags);
568
569 usb_kill_anchored_urbs(&dev->tx_active);
570 usb_kill_anchored_urbs(&dev->rx_active);
571
572 return 0;
573}
574
575static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
576{
577 int retval;
578 struct data_bridge *dev = usb_get_intfdata(intf);
579 struct bridge *brdg = dev->brdg;
580
581 retval = data_bridge_suspend(dev, message);
582 if (!retval) {
583 if (message.event & PM_EVENT_SUSPEND) {
584 if (brdg)
585 retval = ctrl_bridge_suspend(brdg->ch_id);
586 intf->dev.power.power_state.event = message.event;
587 }
588 } else {
589 dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n",
590 __func__);
591 }
592 return retval;
593}
594
595static int data_bridge_probe(struct usb_interface *iface,
596 struct usb_host_endpoint *bulk_in,
597 struct usb_host_endpoint *bulk_out, int id)
598{
599 struct data_bridge *dev;
600
601 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
602 if (!dev) {
603 err("%s: unable to allocate dev\n", __func__);
604 return -ENOMEM;
605 }
606
607 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
608 if (!dev->pdev) {
609 err("%s: unable to allocate platform device\n", __func__);
610 kfree(dev);
611 return -ENOMEM;
612 }
613
614 init_usb_anchor(&dev->tx_active);
615 init_usb_anchor(&dev->rx_active);
616 init_usb_anchor(&dev->delayed);
617
618 INIT_LIST_HEAD(&dev->rx_idle);
619 skb_queue_head_init(&dev->rx_done);
620
621 dev->wq = bridge_wq;
622
623 dev->udev = interface_to_usbdev(iface);
624 dev->intf = iface;
625
626 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
627 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
628
629 dev->bulk_out = usb_sndbulkpipe(dev->udev,
630 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
631
632 usb_set_intfdata(iface, dev);
633
634 INIT_WORK(&dev->kevent, defer_kevent);
635 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
636
637 __dev[id] = dev;
638
639 /*allocate list of rx urbs*/
640 data_bridge_prepare_rx(dev);
641
642 platform_device_add(dev->pdev);
643
644 return 0;
645}
646
647#if defined(CONFIG_DEBUG_FS)
648#define DEBUG_BUF_SIZE 1024
649static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
650 size_t count, loff_t *ppos)
651{
652 struct data_bridge *dev;
653 char *buf;
654 int ret;
655 int i;
656 int temp = 0;
657
658 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
659 if (!buf)
660 return -ENOMEM;
661
662 for (i = 0; i < ch_id; i++) {
663 dev = __dev[i];
664 if (!dev)
665 continue;
666
667 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
668 "\nName#%s dev %p\n"
669 "pending tx urbs: %u\n"
670 "tx urb drp cnt: %u\n"
671 "to host: %lu\n"
672 "to mdm: %lu\n"
673 "tx throttled cnt: %u\n"
674 "tx unthrottled cnt: %u\n"
675 "rx throttled cnt: %u\n"
676 "rx unthrottled cnt: %u\n"
677 "rx done skb qlen: %u\n"
678 "suspended: %d\n"
679 "TX_HALT: %d\n"
680 "RX_HALT: %d\n",
681 dev->pdev->name, dev,
682 atomic_read(&dev->pending_txurbs),
683 dev->txurb_drp_cnt,
684 dev->to_host,
685 dev->to_modem,
686 dev->tx_throttled_cnt,
687 dev->tx_unthrottled_cnt,
688 dev->rx_throttled_cnt,
689 dev->rx_unthrottled_cnt,
690 dev->rx_done.qlen,
691 test_bit(SUSPENDED, &dev->flags),
692 test_bit(TX_HALT, &dev->flags),
693 test_bit(RX_HALT, &dev->flags));
694
695 }
696
697 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
698
699 kfree(buf);
700
701 return ret;
702}
703
704static ssize_t data_bridge_reset_stats(struct file *file,
705 const char __user *buf, size_t count, loff_t *ppos)
706{
707 struct data_bridge *dev;
708 int i;
709
710 for (i = 0; i < ch_id; i++) {
711 dev = __dev[i];
712 if (!dev)
713 continue;
714
715 dev->to_host = 0;
716 dev->to_modem = 0;
717 dev->txurb_drp_cnt = 0;
718 dev->tx_throttled_cnt = 0;
719 dev->tx_unthrottled_cnt = 0;
720 dev->rx_throttled_cnt = 0;
721 dev->rx_unthrottled_cnt = 0;
722 }
723 return count;
724}
725
726const struct file_operations data_stats_ops = {
727 .read = data_bridge_read_stats,
728 .write = data_bridge_reset_stats,
729};
730
731struct dentry *data_dent;
732struct dentry *data_dfile;
733static void data_bridge_debugfs_init(void)
734{
735 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
736 if (IS_ERR(data_dent))
737 return;
738
739 data_dfile = debugfs_create_file("status", 0644, data_dent, 0,
740 &data_stats_ops);
741 if (!data_dfile || IS_ERR(data_dfile))
742 debugfs_remove(data_dent);
743}
744
745static void data_bridge_debugfs_exit(void)
746{
747 debugfs_remove(data_dfile);
748 debugfs_remove(data_dent);
749}
750
751#else
752static void data_bridge_debugfs_init(void) { }
753static void data_bridge_debugfs_exit(void) { }
754#endif
755
756static int __devinit
757bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
758{
759 struct usb_host_endpoint *endpoint = NULL;
760 struct usb_host_endpoint *bulk_in = NULL;
761 struct usb_host_endpoint *bulk_out = NULL;
762 struct usb_host_endpoint *int_in = NULL;
763 struct usb_device *udev;
764 int i;
765 int status = 0;
766 int numends;
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800767 unsigned int iface_num;
Hemant Kumar14401d52011-11-03 16:40:32 -0700768
769 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
770
771 if (iface->num_altsetting != 1) {
772 err("%s invalid num_altsetting %u\n",
773 __func__, iface->num_altsetting);
774 return -EINVAL;
775 }
776
777 udev = interface_to_usbdev(iface);
778 usb_get_dev(udev);
779
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800780 if (!test_bit(iface_num, &id->driver_info))
Hemant Kumar46f9f242011-12-15 20:20:58 -0800781 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700782
783 numends = iface->cur_altsetting->desc.bNumEndpoints;
784 for (i = 0; i < numends; i++) {
785 endpoint = iface->cur_altsetting->endpoint + i;
786 if (!endpoint) {
787 dev_err(&udev->dev, "%s: invalid endpoint %u\n",
788 __func__, i);
789 status = -EINVAL;
790 goto out;
791 }
792
793 if (usb_endpoint_is_bulk_in(&endpoint->desc))
794 bulk_in = endpoint;
795 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
796 bulk_out = endpoint;
797 else if (usb_endpoint_is_int_in(&endpoint->desc))
798 int_in = endpoint;
799 }
800
801 if (!bulk_in || !bulk_out || !int_in) {
802 dev_err(&udev->dev, "%s: invalid endpoints\n", __func__);
803 status = -EINVAL;
804 goto out;
805 }
806
807 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
808 if (status < 0) {
809 dev_err(&udev->dev, "data_bridge_probe failed %d\n", status);
810 goto out;
811 }
812
813 status = ctrl_bridge_probe(iface, int_in, ch_id);
814 if (status < 0) {
815 dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status);
816 goto free_data_bridge;
817 }
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800818
Hemant Kumar14401d52011-11-03 16:40:32 -0700819 ch_id++;
820
821 return 0;
822
823free_data_bridge:
824 platform_device_del(__dev[ch_id]->pdev);
825 usb_set_intfdata(iface, NULL);
826 kfree(__dev[ch_id]);
827 __dev[ch_id] = NULL;
828out:
829 usb_put_dev(udev);
830
831 return status;
832}
833
834static void bridge_disconnect(struct usb_interface *intf)
835{
836 struct data_bridge *dev = usb_get_intfdata(intf);
837 struct list_head *head;
838 struct urb *rx_urb;
839 unsigned long flags;
Hemant Kumar14401d52011-11-03 16:40:32 -0700840
841 if (!dev) {
842 err("%s: data device not found\n", __func__);
843 return;
844 }
845
Hemant Kumar14401d52011-11-03 16:40:32 -0700846 ch_id--;
847 ctrl_bridge_disconnect(ch_id);
848 platform_device_del(dev->pdev);
849 usb_set_intfdata(intf, NULL);
850 __dev[ch_id] = NULL;
851
852 cancel_work_sync(&dev->process_rx_w);
853 cancel_work_sync(&dev->kevent);
854
855 /*free rx urbs*/
856 head = &dev->rx_idle;
857 spin_lock_irqsave(&dev->rx_done.lock, flags);
858 while (!list_empty(head)) {
859 rx_urb = list_entry(head->next, struct urb, urb_list);
860 list_del(&rx_urb->urb_list);
861 usb_free_urb(rx_urb);
862 }
863 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
864
865 usb_put_dev(dev->udev);
866 kfree(dev);
867}
868
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800869/*bit position represents interface number*/
870#define PID9001_IFACE_MASK 0xC
871#define PID9034_IFACE_MASK 0xC
872#define PID9048_IFACE_MASK 0x18
873
Hemant Kumar14401d52011-11-03 16:40:32 -0700874static const struct usb_device_id bridge_ids[] = {
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800875 { USB_DEVICE(0x5c6, 0x9001),
876 .driver_info = PID9001_IFACE_MASK,
877 },
878 { USB_DEVICE(0x5c6, 0x9034),
879 .driver_info = PID9034_IFACE_MASK,
880 },
881 { USB_DEVICE(0x5c6, 0x9048),
882 .driver_info = PID9048_IFACE_MASK,
883 },
Jack Phamb1ad7152011-12-07 10:58:11 -0800884
885 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -0700886};
Hemant Kumar14401d52011-11-03 16:40:32 -0700887MODULE_DEVICE_TABLE(usb, bridge_ids);
888
889static struct usb_driver bridge_driver = {
890 .name = "mdm_bridge",
891 .probe = bridge_probe,
892 .disconnect = bridge_disconnect,
893 .id_table = bridge_ids,
894 .suspend = bridge_suspend,
895 .resume = bridge_resume,
896 .supports_autosuspend = 1,
897};
898
899static int __init bridge_init(void)
900{
901 int ret;
902
903 ret = usb_register(&bridge_driver);
904 if (ret) {
905 err("%s: unable to register mdm_bridge driver", __func__);
906 return ret;
907 }
908
909 bridge_wq = create_singlethread_workqueue("mdm_bridge");
910 if (!bridge_wq) {
911 usb_deregister(&bridge_driver);
912 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
913 return -ENOMEM;
914 }
915
916 data_bridge_debugfs_init();
917
918 return 0;
919}
920
921static void __exit bridge_exit(void)
922{
923 data_bridge_debugfs_exit();
924 destroy_workqueue(bridge_wq);
925 usb_deregister(&bridge_driver);
926}
927
928module_init(bridge_init);
929module_exit(bridge_exit);
930
931MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
932MODULE_LICENSE("GPL v2");