blob: 78db1fcb034c81ff20ebbf49effdb49ace9aa56f [file] [log] [blame]
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Hemant Kumar14401d52011-11-03 16:40:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
24#define MAX_RX_URBS 50
25#define RMNET_RX_BUFSIZE 2048
26
Hemant Kumarfd2d5e52011-12-22 17:51:53 -080027#define STOP_SUBMIT_URB_LIMIT 500
Hemant Kumar14401d52011-11-03 16:40:32 -070028#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
54#define TX_HALT BIT(0)
55#define RX_HALT BIT(1)
56#define SUSPENDED BIT(2)
57
58struct data_bridge {
59 struct usb_interface *intf;
60 struct usb_device *udev;
61 unsigned int bulk_in;
62 unsigned int bulk_out;
Hemant Kumar06b7e432012-01-19 22:13:50 -080063 int err;
Hemant Kumar14401d52011-11-03 16:40:32 -070064
65 /* keep track of in-flight URBs */
66 struct usb_anchor tx_active;
67 struct usb_anchor rx_active;
68
69 /* keep track of outgoing URBs during suspend */
70 struct usb_anchor delayed;
71
72 struct list_head rx_idle;
73 struct sk_buff_head rx_done;
74
75 struct workqueue_struct *wq;
76 struct work_struct process_rx_w;
77
78 struct bridge *brdg;
79
80 /* work queue function for handling halt conditions */
81 struct work_struct kevent;
82
83 unsigned long flags;
84
85 struct platform_device *pdev;
86
87 /* counters */
88 atomic_t pending_txurbs;
89 unsigned int txurb_drp_cnt;
90 unsigned long to_host;
91 unsigned long to_modem;
92 unsigned int tx_throttled_cnt;
93 unsigned int tx_unthrottled_cnt;
94 unsigned int rx_throttled_cnt;
95 unsigned int rx_unthrottled_cnt;
96};
97
98static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
99
100/* counter used for indexing data bridge devices */
101static int ch_id;
102
103static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
104 gfp_t flags);
105
106static inline bool rx_halted(struct data_bridge *dev)
107{
108 return test_bit(RX_HALT, &dev->flags);
109}
110
111static inline bool rx_throttled(struct bridge *brdg)
112{
113 return test_bit(RX_THROTTLED, &brdg->flags);
114}
115
116int data_bridge_unthrottle_rx(unsigned int id)
117{
118 struct data_bridge *dev;
119
120 if (id >= MAX_BRIDGE_DEVICES)
121 return -EINVAL;
122
123 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800124 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700125 return -ENODEV;
126
127 dev->rx_unthrottled_cnt++;
128 queue_work(dev->wq, &dev->process_rx_w);
129
130 return 0;
131}
132EXPORT_SYMBOL(data_bridge_unthrottle_rx);
133
134static void data_bridge_process_rx(struct work_struct *work)
135{
136 int retval;
137 unsigned long flags;
138 struct urb *rx_idle;
139 struct sk_buff *skb;
140 struct data_bridge *dev =
141 container_of(work, struct data_bridge, process_rx_w);
142
143 struct bridge *brdg = dev->brdg;
144
145 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
146 return;
147
148 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
149 dev->to_host++;
150 /* hand off sk_buff to client,they'll need to free it */
151 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
152 if (retval == -ENOTCONN || retval == -EINVAL) {
153 return;
154 } else if (retval == -EBUSY) {
155 dev->rx_throttled_cnt++;
156 break;
157 }
158 }
159
160 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar14401d52011-11-03 16:40:32 -0700161 while (!list_empty(&dev->rx_idle)) {
Hemant Kumarfd2d5e52011-12-22 17:51:53 -0800162 if (dev->rx_done.qlen > stop_submit_urb_limit)
163 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700164
165 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
166 list_del(&rx_idle->urb_list);
167 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
168 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
169 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800170 if (retval) {
171 list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
Hemant Kumar14401d52011-11-03 16:40:32 -0700172 break;
Hemant Kumar184765b2011-12-27 13:20:45 -0800173 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700174 }
175 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
176}
177
178static void data_bridge_read_cb(struct urb *urb)
179{
180 struct bridge *brdg;
181 struct sk_buff *skb = urb->context;
182 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
183 bool queue = 0;
184
185 brdg = dev->brdg;
186
187 skb_put(skb, urb->actual_length);
188
189 switch (urb->status) {
190 case 0: /* success */
191 queue = 1;
192 spin_lock(&dev->rx_done.lock);
193 __skb_queue_tail(&dev->rx_done, skb);
194 spin_unlock(&dev->rx_done.lock);
195 break;
196
197 /*do not resubmit*/
198 case -EPIPE:
199 set_bit(RX_HALT, &dev->flags);
200 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
201 schedule_work(&dev->kevent);
202 /* FALLTHROUGH */
203 case -ESHUTDOWN:
204 case -ENOENT: /* suspended */
205 case -ECONNRESET: /* unplug */
206 case -EPROTO:
207 dev_kfree_skb_any(skb);
208 break;
209
210 /*resubmit */
211 case -EOVERFLOW: /*babble error*/
212 default:
213 queue = 1;
214 dev_kfree_skb_any(skb);
215 pr_debug_ratelimited("%s: non zero urb status = %d\n",
216 __func__, urb->status);
217 break;
218 }
219
220 spin_lock(&dev->rx_done.lock);
221 list_add_tail(&urb->urb_list, &dev->rx_idle);
222 spin_unlock(&dev->rx_done.lock);
223
224 if (queue)
225 queue_work(dev->wq, &dev->process_rx_w);
226}
227
228static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
229 gfp_t flags)
230{
231 struct sk_buff *skb;
232 int retval = -EINVAL;
233
234 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800235 if (!skb)
Hemant Kumar14401d52011-11-03 16:40:32 -0700236 return -ENOMEM;
Hemant Kumar14401d52011-11-03 16:40:32 -0700237
238 *((struct data_bridge **)skb->cb) = dev;
239
240 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
241 skb->data, RMNET_RX_BUFSIZE,
242 data_bridge_read_cb, skb);
243
244 if (test_bit(SUSPENDED, &dev->flags))
245 goto suspended;
246
247 usb_anchor_urb(rx_urb, &dev->rx_active);
248 retval = usb_submit_urb(rx_urb, flags);
249 if (retval)
250 goto fail;
251
252 return 0;
253fail:
254 usb_unanchor_urb(rx_urb);
255suspended:
256 dev_kfree_skb_any(skb);
Hemant Kumar184765b2011-12-27 13:20:45 -0800257
Hemant Kumar14401d52011-11-03 16:40:32 -0700258 return retval;
259}
260
261static int data_bridge_prepare_rx(struct data_bridge *dev)
262{
263 int i;
264 struct urb *rx_urb;
265
266 for (i = 0; i < max_rx_urbs; i++) {
267 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
268 if (!rx_urb)
269 return -ENOMEM;
270
271 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
272 }
273 return 0;
274}
275
276int data_bridge_open(struct bridge *brdg)
277{
278 struct data_bridge *dev;
279
280 if (!brdg) {
281 err("bridge is null\n");
282 return -EINVAL;
283 }
284
285 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
286 return -EINVAL;
287
288 dev = __dev[brdg->ch_id];
289 if (!dev) {
290 err("dev is null\n");
291 return -ENODEV;
292 }
293
294 dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev);
295
296 dev->brdg = brdg;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800297 dev->err = 0;
Hemant Kumar14401d52011-11-03 16:40:32 -0700298 atomic_set(&dev->pending_txurbs, 0);
299 dev->to_host = 0;
300 dev->to_modem = 0;
301 dev->txurb_drp_cnt = 0;
302 dev->tx_throttled_cnt = 0;
303 dev->tx_unthrottled_cnt = 0;
304 dev->rx_throttled_cnt = 0;
305 dev->rx_unthrottled_cnt = 0;
306
307 queue_work(dev->wq, &dev->process_rx_w);
308
309 return 0;
310}
311EXPORT_SYMBOL(data_bridge_open);
312
313void data_bridge_close(unsigned int id)
314{
315 struct data_bridge *dev;
316 struct sk_buff *skb;
317 unsigned long flags;
318
319 if (id >= MAX_BRIDGE_DEVICES)
320 return;
321
322 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800323 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700324 return;
325
326 dev_dbg(&dev->udev->dev, "%s:\n", __func__);
327
328 usb_unlink_anchored_urbs(&dev->tx_active);
329 usb_unlink_anchored_urbs(&dev->rx_active);
330 usb_unlink_anchored_urbs(&dev->delayed);
331
332 spin_lock_irqsave(&dev->rx_done.lock, flags);
333 while ((skb = __skb_dequeue(&dev->rx_done)))
334 dev_kfree_skb_any(skb);
335 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
336
337 dev->brdg = NULL;
338}
339EXPORT_SYMBOL(data_bridge_close);
340
341static void defer_kevent(struct work_struct *work)
342{
343 int status;
344 struct data_bridge *dev =
345 container_of(work, struct data_bridge, kevent);
346
347 if (!dev)
348 return;
349
350 if (test_bit(TX_HALT, &dev->flags)) {
351 usb_unlink_anchored_urbs(&dev->tx_active);
352
353 status = usb_autopm_get_interface(dev->intf);
354 if (status < 0) {
355 dev_err(&dev->udev->dev,
356 "can't acquire interface, status %d\n", status);
357 return;
358 }
359
360 status = usb_clear_halt(dev->udev, dev->bulk_out);
361 usb_autopm_put_interface(dev->intf);
362 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
363 dev_err(&dev->udev->dev,
364 "can't clear tx halt, status %d\n", status);
365 else
366 clear_bit(TX_HALT, &dev->flags);
367 }
368
369 if (test_bit(RX_HALT, &dev->flags)) {
370 usb_unlink_anchored_urbs(&dev->rx_active);
371
372 status = usb_autopm_get_interface(dev->intf);
373 if (status < 0) {
374 dev_err(&dev->udev->dev,
375 "can't acquire interface, status %d\n", status);
376 return;
377 }
378
379 status = usb_clear_halt(dev->udev, dev->bulk_in);
380 usb_autopm_put_interface(dev->intf);
381 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
382 dev_err(&dev->udev->dev,
383 "can't clear rx halt, status %d\n", status);
384 else {
385 clear_bit(RX_HALT, &dev->flags);
386 if (dev->brdg)
387 queue_work(dev->wq, &dev->process_rx_w);
388 }
389 }
390}
391
392static void data_bridge_write_cb(struct urb *urb)
393{
394 struct sk_buff *skb = urb->context;
395 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
396 struct bridge *brdg = dev->brdg;
397 int pending;
398
399 pr_debug("%s: dev:%p\n", __func__, dev);
400
401 switch (urb->status) {
402 case 0: /*success*/
403 break;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800404 case -EPROTO:
405 dev->err = -EPROTO;
406 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700407 case -EPIPE:
408 set_bit(TX_HALT, &dev->flags);
409 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
410 schedule_work(&dev->kevent);
411 /* FALLTHROUGH */
412 case -ESHUTDOWN:
413 case -ENOENT: /* suspended */
414 case -ECONNRESET: /* unplug */
415 case -EOVERFLOW: /*babble error*/
416 /* FALLTHROUGH */
417 default:
418 pr_debug_ratelimited("%s: non zero urb status = %d\n",
419 __func__, urb->status);
420 }
421
422 usb_free_urb(urb);
423 dev_kfree_skb_any(skb);
424
425 pending = atomic_dec_return(&dev->pending_txurbs);
426
427 /*flow ctrl*/
428 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
429 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
430 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
431 __func__, pending);
432 dev->tx_unthrottled_cnt++;
433 if (brdg->ops.unthrottle_tx)
434 brdg->ops.unthrottle_tx(brdg->ctx);
435 }
436
437 usb_autopm_put_interface_async(dev->intf);
438}
439
440int data_bridge_write(unsigned int id, struct sk_buff *skb)
441{
442 int result;
443 int size = skb->len;
444 int pending;
445 struct urb *txurb;
446 struct data_bridge *dev = __dev[id];
447 struct bridge *brdg;
448
Hemant Kumar06b7e432012-01-19 22:13:50 -0800449 if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
Hemant Kumar14401d52011-11-03 16:40:32 -0700450 return -ENODEV;
451
452 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800453 if (!brdg)
454 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700455
456 dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len);
457
458 result = usb_autopm_get_interface(dev->intf);
459 if (result < 0) {
460 dev_err(&dev->udev->dev, "%s: resume failure\n", __func__);
461 goto error;
462 }
463
464 txurb = usb_alloc_urb(0, GFP_KERNEL);
465 if (!txurb) {
466 dev_err(&dev->udev->dev, "%s: error allocating read urb\n",
467 __func__);
468 result = -ENOMEM;
469 goto error;
470 }
471
472 /* store dev pointer in skb */
473 *((struct data_bridge **)skb->cb) = dev;
474
475 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
476 skb->data, skb->len, data_bridge_write_cb, skb);
477
478 if (test_bit(SUSPENDED, &dev->flags)) {
479 usb_anchor_urb(txurb, &dev->delayed);
480 goto free_urb;
481 }
482
483 pending = atomic_inc_return(&dev->pending_txurbs);
484 usb_anchor_urb(txurb, &dev->tx_active);
485
486 result = usb_submit_urb(txurb, GFP_KERNEL);
487 if (result < 0) {
488 usb_unanchor_urb(txurb);
489 atomic_dec(&dev->pending_txurbs);
490 dev_err(&dev->udev->dev, "%s: submit URB error %d\n",
491 __func__, result);
492 goto free_urb;
493 }
494
495 dev->to_modem++;
496 dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending);
497
498 /* flow control: last urb submitted but return -EBUSY */
499 if (fctrl_support && pending > fctrl_en_thld) {
500 set_bit(TX_THROTTLED, &brdg->flags);
501 dev->tx_throttled_cnt++;
502 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
503 __func__, pending);
504 return -EBUSY;
505 }
506
507 return size;
508
509free_urb:
510 usb_free_urb(txurb);
511error:
512 dev->txurb_drp_cnt++;
513 usb_autopm_put_interface(dev->intf);
514
515 return result;
516}
517EXPORT_SYMBOL(data_bridge_write);
518
519static int data_bridge_resume(struct data_bridge *dev)
520{
521 struct urb *urb;
522 int retval;
523
524 while ((urb = usb_get_from_anchor(&dev->delayed))) {
525 usb_anchor_urb(urb, &dev->tx_active);
526 atomic_inc(&dev->pending_txurbs);
527 retval = usb_submit_urb(urb, GFP_ATOMIC);
528 if (retval < 0) {
529 atomic_dec(&dev->pending_txurbs);
530 usb_unanchor_urb(urb);
531
532 /* TODO: need to free urb data */
533 usb_scuttle_anchored_urbs(&dev->delayed);
534 break;
535 }
536 dev->to_modem++;
537 dev->txurb_drp_cnt--;
538 }
539
540 clear_bit(SUSPENDED, &dev->flags);
541
542 if (dev->brdg)
543 queue_work(dev->wq, &dev->process_rx_w);
544
545 return 0;
546}
547
548static int bridge_resume(struct usb_interface *iface)
549{
550 int retval = 0;
551 int oldstate;
552 struct data_bridge *dev = usb_get_intfdata(iface);
553 struct bridge *brdg = dev->brdg;
554
555 oldstate = iface->dev.power.power_state.event;
556 iface->dev.power.power_state.event = PM_EVENT_ON;
557
558 retval = data_bridge_resume(dev);
559 if (!retval) {
560 if (oldstate & PM_EVENT_SUSPEND && brdg)
561 retval = ctrl_bridge_resume(brdg->ch_id);
562 }
563 return retval;
564}
565
566static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
567{
568 if (atomic_read(&dev->pending_txurbs) &&
569 (message.event & PM_EVENT_AUTO))
570 return -EBUSY;
571
572 set_bit(SUSPENDED, &dev->flags);
573
574 usb_kill_anchored_urbs(&dev->tx_active);
575 usb_kill_anchored_urbs(&dev->rx_active);
576
577 return 0;
578}
579
580static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
581{
582 int retval;
583 struct data_bridge *dev = usb_get_intfdata(intf);
584 struct bridge *brdg = dev->brdg;
585
586 retval = data_bridge_suspend(dev, message);
587 if (!retval) {
588 if (message.event & PM_EVENT_SUSPEND) {
589 if (brdg)
590 retval = ctrl_bridge_suspend(brdg->ch_id);
591 intf->dev.power.power_state.event = message.event;
592 }
593 } else {
594 dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n",
595 __func__);
596 }
597 return retval;
598}
599
600static int data_bridge_probe(struct usb_interface *iface,
601 struct usb_host_endpoint *bulk_in,
602 struct usb_host_endpoint *bulk_out, int id)
603{
604 struct data_bridge *dev;
605
606 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
607 if (!dev) {
608 err("%s: unable to allocate dev\n", __func__);
609 return -ENOMEM;
610 }
611
612 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
613 if (!dev->pdev) {
614 err("%s: unable to allocate platform device\n", __func__);
615 kfree(dev);
616 return -ENOMEM;
617 }
618
619 init_usb_anchor(&dev->tx_active);
620 init_usb_anchor(&dev->rx_active);
621 init_usb_anchor(&dev->delayed);
622
623 INIT_LIST_HEAD(&dev->rx_idle);
624 skb_queue_head_init(&dev->rx_done);
625
626 dev->wq = bridge_wq;
627
628 dev->udev = interface_to_usbdev(iface);
629 dev->intf = iface;
630
631 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
632 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
633
634 dev->bulk_out = usb_sndbulkpipe(dev->udev,
635 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
636
637 usb_set_intfdata(iface, dev);
638
639 INIT_WORK(&dev->kevent, defer_kevent);
640 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
641
642 __dev[id] = dev;
643
644 /*allocate list of rx urbs*/
645 data_bridge_prepare_rx(dev);
646
647 platform_device_add(dev->pdev);
648
649 return 0;
650}
651
652#if defined(CONFIG_DEBUG_FS)
653#define DEBUG_BUF_SIZE 1024
654static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
655 size_t count, loff_t *ppos)
656{
657 struct data_bridge *dev;
658 char *buf;
659 int ret;
660 int i;
661 int temp = 0;
662
663 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
664 if (!buf)
665 return -ENOMEM;
666
667 for (i = 0; i < ch_id; i++) {
668 dev = __dev[i];
669 if (!dev)
670 continue;
671
672 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
673 "\nName#%s dev %p\n"
674 "pending tx urbs: %u\n"
675 "tx urb drp cnt: %u\n"
676 "to host: %lu\n"
677 "to mdm: %lu\n"
678 "tx throttled cnt: %u\n"
679 "tx unthrottled cnt: %u\n"
680 "rx throttled cnt: %u\n"
681 "rx unthrottled cnt: %u\n"
682 "rx done skb qlen: %u\n"
Hemant Kumar06b7e432012-01-19 22:13:50 -0800683 "dev err: %d\n"
Hemant Kumar14401d52011-11-03 16:40:32 -0700684 "suspended: %d\n"
685 "TX_HALT: %d\n"
686 "RX_HALT: %d\n",
687 dev->pdev->name, dev,
688 atomic_read(&dev->pending_txurbs),
689 dev->txurb_drp_cnt,
690 dev->to_host,
691 dev->to_modem,
692 dev->tx_throttled_cnt,
693 dev->tx_unthrottled_cnt,
694 dev->rx_throttled_cnt,
695 dev->rx_unthrottled_cnt,
696 dev->rx_done.qlen,
Hemant Kumar06b7e432012-01-19 22:13:50 -0800697 dev->err,
Hemant Kumar14401d52011-11-03 16:40:32 -0700698 test_bit(SUSPENDED, &dev->flags),
699 test_bit(TX_HALT, &dev->flags),
700 test_bit(RX_HALT, &dev->flags));
701
702 }
703
704 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
705
706 kfree(buf);
707
708 return ret;
709}
710
711static ssize_t data_bridge_reset_stats(struct file *file,
712 const char __user *buf, size_t count, loff_t *ppos)
713{
714 struct data_bridge *dev;
715 int i;
716
717 for (i = 0; i < ch_id; i++) {
718 dev = __dev[i];
719 if (!dev)
720 continue;
721
722 dev->to_host = 0;
723 dev->to_modem = 0;
724 dev->txurb_drp_cnt = 0;
725 dev->tx_throttled_cnt = 0;
726 dev->tx_unthrottled_cnt = 0;
727 dev->rx_throttled_cnt = 0;
728 dev->rx_unthrottled_cnt = 0;
729 }
730 return count;
731}
732
733const struct file_operations data_stats_ops = {
734 .read = data_bridge_read_stats,
735 .write = data_bridge_reset_stats,
736};
737
738struct dentry *data_dent;
739struct dentry *data_dfile;
740static void data_bridge_debugfs_init(void)
741{
742 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
743 if (IS_ERR(data_dent))
744 return;
745
746 data_dfile = debugfs_create_file("status", 0644, data_dent, 0,
747 &data_stats_ops);
748 if (!data_dfile || IS_ERR(data_dfile))
749 debugfs_remove(data_dent);
750}
751
752static void data_bridge_debugfs_exit(void)
753{
754 debugfs_remove(data_dfile);
755 debugfs_remove(data_dent);
756}
757
758#else
759static void data_bridge_debugfs_init(void) { }
760static void data_bridge_debugfs_exit(void) { }
761#endif
762
763static int __devinit
764bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
765{
766 struct usb_host_endpoint *endpoint = NULL;
767 struct usb_host_endpoint *bulk_in = NULL;
768 struct usb_host_endpoint *bulk_out = NULL;
769 struct usb_host_endpoint *int_in = NULL;
770 struct usb_device *udev;
771 int i;
772 int status = 0;
773 int numends;
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800774 unsigned int iface_num;
Hemant Kumar14401d52011-11-03 16:40:32 -0700775
776 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
777
778 if (iface->num_altsetting != 1) {
779 err("%s invalid num_altsetting %u\n",
780 __func__, iface->num_altsetting);
781 return -EINVAL;
782 }
783
784 udev = interface_to_usbdev(iface);
785 usb_get_dev(udev);
786
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800787 if (!test_bit(iface_num, &id->driver_info))
Hemant Kumar46f9f242011-12-15 20:20:58 -0800788 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700789
790 numends = iface->cur_altsetting->desc.bNumEndpoints;
791 for (i = 0; i < numends; i++) {
792 endpoint = iface->cur_altsetting->endpoint + i;
793 if (!endpoint) {
794 dev_err(&udev->dev, "%s: invalid endpoint %u\n",
795 __func__, i);
796 status = -EINVAL;
797 goto out;
798 }
799
800 if (usb_endpoint_is_bulk_in(&endpoint->desc))
801 bulk_in = endpoint;
802 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
803 bulk_out = endpoint;
804 else if (usb_endpoint_is_int_in(&endpoint->desc))
805 int_in = endpoint;
806 }
807
808 if (!bulk_in || !bulk_out || !int_in) {
809 dev_err(&udev->dev, "%s: invalid endpoints\n", __func__);
810 status = -EINVAL;
811 goto out;
812 }
813
814 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
815 if (status < 0) {
816 dev_err(&udev->dev, "data_bridge_probe failed %d\n", status);
817 goto out;
818 }
819
820 status = ctrl_bridge_probe(iface, int_in, ch_id);
821 if (status < 0) {
822 dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status);
823 goto free_data_bridge;
824 }
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800825
Hemant Kumar14401d52011-11-03 16:40:32 -0700826 ch_id++;
827
828 return 0;
829
830free_data_bridge:
831 platform_device_del(__dev[ch_id]->pdev);
832 usb_set_intfdata(iface, NULL);
833 kfree(__dev[ch_id]);
834 __dev[ch_id] = NULL;
835out:
836 usb_put_dev(udev);
837
838 return status;
839}
840
841static void bridge_disconnect(struct usb_interface *intf)
842{
843 struct data_bridge *dev = usb_get_intfdata(intf);
844 struct list_head *head;
845 struct urb *rx_urb;
846 unsigned long flags;
Hemant Kumar14401d52011-11-03 16:40:32 -0700847
848 if (!dev) {
849 err("%s: data device not found\n", __func__);
850 return;
851 }
852
Hemant Kumar14401d52011-11-03 16:40:32 -0700853 ch_id--;
854 ctrl_bridge_disconnect(ch_id);
855 platform_device_del(dev->pdev);
856 usb_set_intfdata(intf, NULL);
857 __dev[ch_id] = NULL;
858
859 cancel_work_sync(&dev->process_rx_w);
860 cancel_work_sync(&dev->kevent);
861
862 /*free rx urbs*/
863 head = &dev->rx_idle;
864 spin_lock_irqsave(&dev->rx_done.lock, flags);
865 while (!list_empty(head)) {
866 rx_urb = list_entry(head->next, struct urb, urb_list);
867 list_del(&rx_urb->urb_list);
868 usb_free_urb(rx_urb);
869 }
870 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
871
872 usb_put_dev(dev->udev);
873 kfree(dev);
874}
875
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800876/*bit position represents interface number*/
877#define PID9001_IFACE_MASK 0xC
878#define PID9034_IFACE_MASK 0xC
879#define PID9048_IFACE_MASK 0x18
880
Hemant Kumar14401d52011-11-03 16:40:32 -0700881static const struct usb_device_id bridge_ids[] = {
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800882 { USB_DEVICE(0x5c6, 0x9001),
883 .driver_info = PID9001_IFACE_MASK,
884 },
885 { USB_DEVICE(0x5c6, 0x9034),
886 .driver_info = PID9034_IFACE_MASK,
887 },
888 { USB_DEVICE(0x5c6, 0x9048),
889 .driver_info = PID9048_IFACE_MASK,
890 },
Jack Phamb1ad7152011-12-07 10:58:11 -0800891
892 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -0700893};
Hemant Kumar14401d52011-11-03 16:40:32 -0700894MODULE_DEVICE_TABLE(usb, bridge_ids);
895
896static struct usb_driver bridge_driver = {
897 .name = "mdm_bridge",
898 .probe = bridge_probe,
899 .disconnect = bridge_disconnect,
900 .id_table = bridge_ids,
901 .suspend = bridge_suspend,
902 .resume = bridge_resume,
903 .supports_autosuspend = 1,
904};
905
906static int __init bridge_init(void)
907{
908 int ret;
909
910 ret = usb_register(&bridge_driver);
911 if (ret) {
912 err("%s: unable to register mdm_bridge driver", __func__);
913 return ret;
914 }
915
916 bridge_wq = create_singlethread_workqueue("mdm_bridge");
917 if (!bridge_wq) {
918 usb_deregister(&bridge_driver);
919 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
920 return -ENOMEM;
921 }
922
923 data_bridge_debugfs_init();
924
925 return 0;
926}
927
928static void __exit bridge_exit(void)
929{
930 data_bridge_debugfs_exit();
931 destroy_workqueue(bridge_wq);
932 usb_deregister(&bridge_driver);
933}
934
935module_init(bridge_init);
936module_exit(bridge_exit);
937
938MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
939MODULE_LICENSE("GPL v2");