blob: d03a4b784ebba96040bc32885bd29efe99f93554 [file] [log] [blame]
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Hemant Kumar14401d52011-11-03 16:40:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
24#define MAX_RX_URBS 50
25#define RMNET_RX_BUFSIZE 2048
26
Hemant Kumarfd2d5e52011-12-22 17:51:53 -080027#define STOP_SUBMIT_URB_LIMIT 500
Hemant Kumar14401d52011-11-03 16:40:32 -070028#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
Hemant Kumar73eff1c2012-01-09 18:49:11 -080054static unsigned tx_urb_mult = 20;
55module_param(tx_urb_mult, uint, S_IRUGO|S_IWUSR);
56
Hemant Kumar14401d52011-11-03 16:40:32 -070057#define TX_HALT BIT(0)
58#define RX_HALT BIT(1)
59#define SUSPENDED BIT(2)
60
61struct data_bridge {
62 struct usb_interface *intf;
63 struct usb_device *udev;
64 unsigned int bulk_in;
65 unsigned int bulk_out;
Hemant Kumar06b7e432012-01-19 22:13:50 -080066 int err;
Hemant Kumar14401d52011-11-03 16:40:32 -070067
68 /* keep track of in-flight URBs */
69 struct usb_anchor tx_active;
70 struct usb_anchor rx_active;
71
72 /* keep track of outgoing URBs during suspend */
73 struct usb_anchor delayed;
74
75 struct list_head rx_idle;
76 struct sk_buff_head rx_done;
77
78 struct workqueue_struct *wq;
79 struct work_struct process_rx_w;
80
81 struct bridge *brdg;
82
83 /* work queue function for handling halt conditions */
84 struct work_struct kevent;
85
86 unsigned long flags;
87
88 struct platform_device *pdev;
89
90 /* counters */
91 atomic_t pending_txurbs;
92 unsigned int txurb_drp_cnt;
93 unsigned long to_host;
94 unsigned long to_modem;
95 unsigned int tx_throttled_cnt;
96 unsigned int tx_unthrottled_cnt;
97 unsigned int rx_throttled_cnt;
98 unsigned int rx_unthrottled_cnt;
99};
100
101static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
102
103/* counter used for indexing data bridge devices */
104static int ch_id;
105
106static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
107 gfp_t flags);
108
109static inline bool rx_halted(struct data_bridge *dev)
110{
111 return test_bit(RX_HALT, &dev->flags);
112}
113
114static inline bool rx_throttled(struct bridge *brdg)
115{
116 return test_bit(RX_THROTTLED, &brdg->flags);
117}
118
119int data_bridge_unthrottle_rx(unsigned int id)
120{
121 struct data_bridge *dev;
122
123 if (id >= MAX_BRIDGE_DEVICES)
124 return -EINVAL;
125
126 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800127 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700128 return -ENODEV;
129
130 dev->rx_unthrottled_cnt++;
131 queue_work(dev->wq, &dev->process_rx_w);
132
133 return 0;
134}
135EXPORT_SYMBOL(data_bridge_unthrottle_rx);
136
137static void data_bridge_process_rx(struct work_struct *work)
138{
139 int retval;
140 unsigned long flags;
141 struct urb *rx_idle;
142 struct sk_buff *skb;
143 struct data_bridge *dev =
144 container_of(work, struct data_bridge, process_rx_w);
145
146 struct bridge *brdg = dev->brdg;
147
148 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
149 return;
150
151 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
152 dev->to_host++;
153 /* hand off sk_buff to client,they'll need to free it */
154 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
155 if (retval == -ENOTCONN || retval == -EINVAL) {
156 return;
157 } else if (retval == -EBUSY) {
158 dev->rx_throttled_cnt++;
159 break;
160 }
161 }
162
163 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar14401d52011-11-03 16:40:32 -0700164 while (!list_empty(&dev->rx_idle)) {
Hemant Kumarfd2d5e52011-12-22 17:51:53 -0800165 if (dev->rx_done.qlen > stop_submit_urb_limit)
166 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700167
168 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
169 list_del(&rx_idle->urb_list);
170 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
171 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
172 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800173 if (retval) {
174 list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
Hemant Kumar14401d52011-11-03 16:40:32 -0700175 break;
Hemant Kumar184765b2011-12-27 13:20:45 -0800176 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700177 }
178 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
179}
180
181static void data_bridge_read_cb(struct urb *urb)
182{
183 struct bridge *brdg;
184 struct sk_buff *skb = urb->context;
185 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
186 bool queue = 0;
187
188 brdg = dev->brdg;
189
190 skb_put(skb, urb->actual_length);
191
192 switch (urb->status) {
193 case 0: /* success */
194 queue = 1;
195 spin_lock(&dev->rx_done.lock);
196 __skb_queue_tail(&dev->rx_done, skb);
197 spin_unlock(&dev->rx_done.lock);
198 break;
199
200 /*do not resubmit*/
201 case -EPIPE:
202 set_bit(RX_HALT, &dev->flags);
203 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
204 schedule_work(&dev->kevent);
205 /* FALLTHROUGH */
206 case -ESHUTDOWN:
207 case -ENOENT: /* suspended */
208 case -ECONNRESET: /* unplug */
209 case -EPROTO:
210 dev_kfree_skb_any(skb);
211 break;
212
213 /*resubmit */
214 case -EOVERFLOW: /*babble error*/
215 default:
216 queue = 1;
217 dev_kfree_skb_any(skb);
218 pr_debug_ratelimited("%s: non zero urb status = %d\n",
219 __func__, urb->status);
220 break;
221 }
222
223 spin_lock(&dev->rx_done.lock);
224 list_add_tail(&urb->urb_list, &dev->rx_idle);
225 spin_unlock(&dev->rx_done.lock);
226
227 if (queue)
228 queue_work(dev->wq, &dev->process_rx_w);
229}
230
231static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
232 gfp_t flags)
233{
234 struct sk_buff *skb;
235 int retval = -EINVAL;
236
237 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800238 if (!skb)
Hemant Kumar14401d52011-11-03 16:40:32 -0700239 return -ENOMEM;
Hemant Kumar14401d52011-11-03 16:40:32 -0700240
241 *((struct data_bridge **)skb->cb) = dev;
242
243 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
244 skb->data, RMNET_RX_BUFSIZE,
245 data_bridge_read_cb, skb);
246
247 if (test_bit(SUSPENDED, &dev->flags))
248 goto suspended;
249
250 usb_anchor_urb(rx_urb, &dev->rx_active);
251 retval = usb_submit_urb(rx_urb, flags);
252 if (retval)
253 goto fail;
254
255 return 0;
256fail:
257 usb_unanchor_urb(rx_urb);
258suspended:
259 dev_kfree_skb_any(skb);
Hemant Kumar184765b2011-12-27 13:20:45 -0800260
Hemant Kumar14401d52011-11-03 16:40:32 -0700261 return retval;
262}
263
264static int data_bridge_prepare_rx(struct data_bridge *dev)
265{
266 int i;
267 struct urb *rx_urb;
268
269 for (i = 0; i < max_rx_urbs; i++) {
270 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
271 if (!rx_urb)
272 return -ENOMEM;
273
274 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
275 }
276 return 0;
277}
278
279int data_bridge_open(struct bridge *brdg)
280{
281 struct data_bridge *dev;
282
283 if (!brdg) {
284 err("bridge is null\n");
285 return -EINVAL;
286 }
287
288 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
289 return -EINVAL;
290
291 dev = __dev[brdg->ch_id];
292 if (!dev) {
293 err("dev is null\n");
294 return -ENODEV;
295 }
296
297 dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev);
298
299 dev->brdg = brdg;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800300 dev->err = 0;
Hemant Kumar14401d52011-11-03 16:40:32 -0700301 atomic_set(&dev->pending_txurbs, 0);
302 dev->to_host = 0;
303 dev->to_modem = 0;
304 dev->txurb_drp_cnt = 0;
305 dev->tx_throttled_cnt = 0;
306 dev->tx_unthrottled_cnt = 0;
307 dev->rx_throttled_cnt = 0;
308 dev->rx_unthrottled_cnt = 0;
309
310 queue_work(dev->wq, &dev->process_rx_w);
311
312 return 0;
313}
314EXPORT_SYMBOL(data_bridge_open);
315
316void data_bridge_close(unsigned int id)
317{
318 struct data_bridge *dev;
319 struct sk_buff *skb;
320 unsigned long flags;
321
322 if (id >= MAX_BRIDGE_DEVICES)
323 return;
324
325 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800326 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700327 return;
328
329 dev_dbg(&dev->udev->dev, "%s:\n", __func__);
330
331 usb_unlink_anchored_urbs(&dev->tx_active);
332 usb_unlink_anchored_urbs(&dev->rx_active);
333 usb_unlink_anchored_urbs(&dev->delayed);
334
335 spin_lock_irqsave(&dev->rx_done.lock, flags);
336 while ((skb = __skb_dequeue(&dev->rx_done)))
337 dev_kfree_skb_any(skb);
338 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
339
340 dev->brdg = NULL;
341}
342EXPORT_SYMBOL(data_bridge_close);
343
344static void defer_kevent(struct work_struct *work)
345{
346 int status;
347 struct data_bridge *dev =
348 container_of(work, struct data_bridge, kevent);
349
350 if (!dev)
351 return;
352
353 if (test_bit(TX_HALT, &dev->flags)) {
354 usb_unlink_anchored_urbs(&dev->tx_active);
355
356 status = usb_autopm_get_interface(dev->intf);
357 if (status < 0) {
358 dev_err(&dev->udev->dev,
359 "can't acquire interface, status %d\n", status);
360 return;
361 }
362
363 status = usb_clear_halt(dev->udev, dev->bulk_out);
364 usb_autopm_put_interface(dev->intf);
365 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
366 dev_err(&dev->udev->dev,
367 "can't clear tx halt, status %d\n", status);
368 else
369 clear_bit(TX_HALT, &dev->flags);
370 }
371
372 if (test_bit(RX_HALT, &dev->flags)) {
373 usb_unlink_anchored_urbs(&dev->rx_active);
374
375 status = usb_autopm_get_interface(dev->intf);
376 if (status < 0) {
377 dev_err(&dev->udev->dev,
378 "can't acquire interface, status %d\n", status);
379 return;
380 }
381
382 status = usb_clear_halt(dev->udev, dev->bulk_in);
383 usb_autopm_put_interface(dev->intf);
384 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
385 dev_err(&dev->udev->dev,
386 "can't clear rx halt, status %d\n", status);
387 else {
388 clear_bit(RX_HALT, &dev->flags);
389 if (dev->brdg)
390 queue_work(dev->wq, &dev->process_rx_w);
391 }
392 }
393}
394
395static void data_bridge_write_cb(struct urb *urb)
396{
397 struct sk_buff *skb = urb->context;
398 struct data_bridge *dev = *(struct data_bridge **)skb->cb;
399 struct bridge *brdg = dev->brdg;
400 int pending;
401
402 pr_debug("%s: dev:%p\n", __func__, dev);
403
404 switch (urb->status) {
405 case 0: /*success*/
406 break;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800407 case -EPROTO:
408 dev->err = -EPROTO;
409 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700410 case -EPIPE:
411 set_bit(TX_HALT, &dev->flags);
412 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
413 schedule_work(&dev->kevent);
414 /* FALLTHROUGH */
415 case -ESHUTDOWN:
416 case -ENOENT: /* suspended */
417 case -ECONNRESET: /* unplug */
418 case -EOVERFLOW: /*babble error*/
419 /* FALLTHROUGH */
420 default:
421 pr_debug_ratelimited("%s: non zero urb status = %d\n",
422 __func__, urb->status);
423 }
424
425 usb_free_urb(urb);
426 dev_kfree_skb_any(skb);
427
428 pending = atomic_dec_return(&dev->pending_txurbs);
429
430 /*flow ctrl*/
431 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
432 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
433 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
434 __func__, pending);
435 dev->tx_unthrottled_cnt++;
436 if (brdg->ops.unthrottle_tx)
437 brdg->ops.unthrottle_tx(brdg->ctx);
438 }
439
440 usb_autopm_put_interface_async(dev->intf);
441}
442
443int data_bridge_write(unsigned int id, struct sk_buff *skb)
444{
445 int result;
446 int size = skb->len;
447 int pending;
448 struct urb *txurb;
449 struct data_bridge *dev = __dev[id];
450 struct bridge *brdg;
451
Hemant Kumar06b7e432012-01-19 22:13:50 -0800452 if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
Hemant Kumar14401d52011-11-03 16:40:32 -0700453 return -ENODEV;
454
455 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800456 if (!brdg)
457 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700458
459 dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len);
460
461 result = usb_autopm_get_interface(dev->intf);
462 if (result < 0) {
463 dev_err(&dev->udev->dev, "%s: resume failure\n", __func__);
464 goto error;
465 }
466
467 txurb = usb_alloc_urb(0, GFP_KERNEL);
468 if (!txurb) {
469 dev_err(&dev->udev->dev, "%s: error allocating read urb\n",
470 __func__);
471 result = -ENOMEM;
472 goto error;
473 }
474
475 /* store dev pointer in skb */
476 *((struct data_bridge **)skb->cb) = dev;
477
478 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
479 skb->data, skb->len, data_bridge_write_cb, skb);
480
481 if (test_bit(SUSPENDED, &dev->flags)) {
482 usb_anchor_urb(txurb, &dev->delayed);
483 goto free_urb;
484 }
485
486 pending = atomic_inc_return(&dev->pending_txurbs);
487 usb_anchor_urb(txurb, &dev->tx_active);
488
Hemant Kumar73eff1c2012-01-09 18:49:11 -0800489 if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
490 txurb->transfer_flags |= URB_NO_INTERRUPT;
491
Hemant Kumar14401d52011-11-03 16:40:32 -0700492 result = usb_submit_urb(txurb, GFP_KERNEL);
493 if (result < 0) {
494 usb_unanchor_urb(txurb);
495 atomic_dec(&dev->pending_txurbs);
496 dev_err(&dev->udev->dev, "%s: submit URB error %d\n",
497 __func__, result);
498 goto free_urb;
499 }
500
501 dev->to_modem++;
502 dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending);
503
504 /* flow control: last urb submitted but return -EBUSY */
505 if (fctrl_support && pending > fctrl_en_thld) {
506 set_bit(TX_THROTTLED, &brdg->flags);
507 dev->tx_throttled_cnt++;
508 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
509 __func__, pending);
510 return -EBUSY;
511 }
512
513 return size;
514
515free_urb:
516 usb_free_urb(txurb);
517error:
518 dev->txurb_drp_cnt++;
519 usb_autopm_put_interface(dev->intf);
520
521 return result;
522}
523EXPORT_SYMBOL(data_bridge_write);
524
525static int data_bridge_resume(struct data_bridge *dev)
526{
527 struct urb *urb;
528 int retval;
529
530 while ((urb = usb_get_from_anchor(&dev->delayed))) {
531 usb_anchor_urb(urb, &dev->tx_active);
532 atomic_inc(&dev->pending_txurbs);
533 retval = usb_submit_urb(urb, GFP_ATOMIC);
534 if (retval < 0) {
535 atomic_dec(&dev->pending_txurbs);
536 usb_unanchor_urb(urb);
537
538 /* TODO: need to free urb data */
539 usb_scuttle_anchored_urbs(&dev->delayed);
540 break;
541 }
542 dev->to_modem++;
543 dev->txurb_drp_cnt--;
544 }
545
546 clear_bit(SUSPENDED, &dev->flags);
547
548 if (dev->brdg)
549 queue_work(dev->wq, &dev->process_rx_w);
550
551 return 0;
552}
553
554static int bridge_resume(struct usb_interface *iface)
555{
556 int retval = 0;
557 int oldstate;
558 struct data_bridge *dev = usb_get_intfdata(iface);
559 struct bridge *brdg = dev->brdg;
560
561 oldstate = iface->dev.power.power_state.event;
562 iface->dev.power.power_state.event = PM_EVENT_ON;
563
564 retval = data_bridge_resume(dev);
565 if (!retval) {
566 if (oldstate & PM_EVENT_SUSPEND && brdg)
567 retval = ctrl_bridge_resume(brdg->ch_id);
568 }
569 return retval;
570}
571
572static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
573{
574 if (atomic_read(&dev->pending_txurbs) &&
575 (message.event & PM_EVENT_AUTO))
576 return -EBUSY;
577
578 set_bit(SUSPENDED, &dev->flags);
579
580 usb_kill_anchored_urbs(&dev->tx_active);
581 usb_kill_anchored_urbs(&dev->rx_active);
582
583 return 0;
584}
585
586static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
587{
588 int retval;
589 struct data_bridge *dev = usb_get_intfdata(intf);
590 struct bridge *brdg = dev->brdg;
591
592 retval = data_bridge_suspend(dev, message);
593 if (!retval) {
594 if (message.event & PM_EVENT_SUSPEND) {
595 if (brdg)
596 retval = ctrl_bridge_suspend(brdg->ch_id);
597 intf->dev.power.power_state.event = message.event;
598 }
599 } else {
600 dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n",
601 __func__);
602 }
603 return retval;
604}
605
606static int data_bridge_probe(struct usb_interface *iface,
607 struct usb_host_endpoint *bulk_in,
608 struct usb_host_endpoint *bulk_out, int id)
609{
610 struct data_bridge *dev;
611
612 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
613 if (!dev) {
614 err("%s: unable to allocate dev\n", __func__);
615 return -ENOMEM;
616 }
617
618 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
619 if (!dev->pdev) {
620 err("%s: unable to allocate platform device\n", __func__);
621 kfree(dev);
622 return -ENOMEM;
623 }
624
625 init_usb_anchor(&dev->tx_active);
626 init_usb_anchor(&dev->rx_active);
627 init_usb_anchor(&dev->delayed);
628
629 INIT_LIST_HEAD(&dev->rx_idle);
630 skb_queue_head_init(&dev->rx_done);
631
632 dev->wq = bridge_wq;
633
634 dev->udev = interface_to_usbdev(iface);
635 dev->intf = iface;
636
637 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
638 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
639
640 dev->bulk_out = usb_sndbulkpipe(dev->udev,
641 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
642
643 usb_set_intfdata(iface, dev);
644
645 INIT_WORK(&dev->kevent, defer_kevent);
646 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
647
648 __dev[id] = dev;
649
650 /*allocate list of rx urbs*/
651 data_bridge_prepare_rx(dev);
652
653 platform_device_add(dev->pdev);
654
655 return 0;
656}
657
658#if defined(CONFIG_DEBUG_FS)
659#define DEBUG_BUF_SIZE 1024
660static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
661 size_t count, loff_t *ppos)
662{
663 struct data_bridge *dev;
664 char *buf;
665 int ret;
666 int i;
667 int temp = 0;
668
669 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
670 if (!buf)
671 return -ENOMEM;
672
673 for (i = 0; i < ch_id; i++) {
674 dev = __dev[i];
675 if (!dev)
676 continue;
677
678 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
679 "\nName#%s dev %p\n"
680 "pending tx urbs: %u\n"
681 "tx urb drp cnt: %u\n"
682 "to host: %lu\n"
683 "to mdm: %lu\n"
684 "tx throttled cnt: %u\n"
685 "tx unthrottled cnt: %u\n"
686 "rx throttled cnt: %u\n"
687 "rx unthrottled cnt: %u\n"
688 "rx done skb qlen: %u\n"
Hemant Kumar06b7e432012-01-19 22:13:50 -0800689 "dev err: %d\n"
Hemant Kumar14401d52011-11-03 16:40:32 -0700690 "suspended: %d\n"
691 "TX_HALT: %d\n"
692 "RX_HALT: %d\n",
693 dev->pdev->name, dev,
694 atomic_read(&dev->pending_txurbs),
695 dev->txurb_drp_cnt,
696 dev->to_host,
697 dev->to_modem,
698 dev->tx_throttled_cnt,
699 dev->tx_unthrottled_cnt,
700 dev->rx_throttled_cnt,
701 dev->rx_unthrottled_cnt,
702 dev->rx_done.qlen,
Hemant Kumar06b7e432012-01-19 22:13:50 -0800703 dev->err,
Hemant Kumar14401d52011-11-03 16:40:32 -0700704 test_bit(SUSPENDED, &dev->flags),
705 test_bit(TX_HALT, &dev->flags),
706 test_bit(RX_HALT, &dev->flags));
707
708 }
709
710 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
711
712 kfree(buf);
713
714 return ret;
715}
716
717static ssize_t data_bridge_reset_stats(struct file *file,
718 const char __user *buf, size_t count, loff_t *ppos)
719{
720 struct data_bridge *dev;
721 int i;
722
723 for (i = 0; i < ch_id; i++) {
724 dev = __dev[i];
725 if (!dev)
726 continue;
727
728 dev->to_host = 0;
729 dev->to_modem = 0;
730 dev->txurb_drp_cnt = 0;
731 dev->tx_throttled_cnt = 0;
732 dev->tx_unthrottled_cnt = 0;
733 dev->rx_throttled_cnt = 0;
734 dev->rx_unthrottled_cnt = 0;
735 }
736 return count;
737}
738
739const struct file_operations data_stats_ops = {
740 .read = data_bridge_read_stats,
741 .write = data_bridge_reset_stats,
742};
743
744struct dentry *data_dent;
745struct dentry *data_dfile;
746static void data_bridge_debugfs_init(void)
747{
748 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
749 if (IS_ERR(data_dent))
750 return;
751
752 data_dfile = debugfs_create_file("status", 0644, data_dent, 0,
753 &data_stats_ops);
754 if (!data_dfile || IS_ERR(data_dfile))
755 debugfs_remove(data_dent);
756}
757
758static void data_bridge_debugfs_exit(void)
759{
760 debugfs_remove(data_dfile);
761 debugfs_remove(data_dent);
762}
763
764#else
765static void data_bridge_debugfs_init(void) { }
766static void data_bridge_debugfs_exit(void) { }
767#endif
768
769static int __devinit
770bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
771{
772 struct usb_host_endpoint *endpoint = NULL;
773 struct usb_host_endpoint *bulk_in = NULL;
774 struct usb_host_endpoint *bulk_out = NULL;
775 struct usb_host_endpoint *int_in = NULL;
776 struct usb_device *udev;
777 int i;
778 int status = 0;
779 int numends;
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800780 unsigned int iface_num;
Hemant Kumar14401d52011-11-03 16:40:32 -0700781
782 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
783
784 if (iface->num_altsetting != 1) {
785 err("%s invalid num_altsetting %u\n",
786 __func__, iface->num_altsetting);
787 return -EINVAL;
788 }
789
790 udev = interface_to_usbdev(iface);
791 usb_get_dev(udev);
792
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800793 if (!test_bit(iface_num, &id->driver_info))
Hemant Kumar46f9f242011-12-15 20:20:58 -0800794 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700795
796 numends = iface->cur_altsetting->desc.bNumEndpoints;
797 for (i = 0; i < numends; i++) {
798 endpoint = iface->cur_altsetting->endpoint + i;
799 if (!endpoint) {
800 dev_err(&udev->dev, "%s: invalid endpoint %u\n",
801 __func__, i);
802 status = -EINVAL;
803 goto out;
804 }
805
806 if (usb_endpoint_is_bulk_in(&endpoint->desc))
807 bulk_in = endpoint;
808 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
809 bulk_out = endpoint;
810 else if (usb_endpoint_is_int_in(&endpoint->desc))
811 int_in = endpoint;
812 }
813
814 if (!bulk_in || !bulk_out || !int_in) {
815 dev_err(&udev->dev, "%s: invalid endpoints\n", __func__);
816 status = -EINVAL;
817 goto out;
818 }
819
820 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
821 if (status < 0) {
822 dev_err(&udev->dev, "data_bridge_probe failed %d\n", status);
823 goto out;
824 }
825
826 status = ctrl_bridge_probe(iface, int_in, ch_id);
827 if (status < 0) {
828 dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status);
829 goto free_data_bridge;
830 }
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800831
Hemant Kumar14401d52011-11-03 16:40:32 -0700832 ch_id++;
833
834 return 0;
835
836free_data_bridge:
837 platform_device_del(__dev[ch_id]->pdev);
838 usb_set_intfdata(iface, NULL);
839 kfree(__dev[ch_id]);
840 __dev[ch_id] = NULL;
841out:
842 usb_put_dev(udev);
843
844 return status;
845}
846
847static void bridge_disconnect(struct usb_interface *intf)
848{
849 struct data_bridge *dev = usb_get_intfdata(intf);
850 struct list_head *head;
851 struct urb *rx_urb;
852 unsigned long flags;
Hemant Kumar14401d52011-11-03 16:40:32 -0700853
854 if (!dev) {
855 err("%s: data device not found\n", __func__);
856 return;
857 }
858
Hemant Kumar14401d52011-11-03 16:40:32 -0700859 ch_id--;
860 ctrl_bridge_disconnect(ch_id);
861 platform_device_del(dev->pdev);
862 usb_set_intfdata(intf, NULL);
863 __dev[ch_id] = NULL;
864
865 cancel_work_sync(&dev->process_rx_w);
866 cancel_work_sync(&dev->kevent);
867
868 /*free rx urbs*/
869 head = &dev->rx_idle;
870 spin_lock_irqsave(&dev->rx_done.lock, flags);
871 while (!list_empty(head)) {
872 rx_urb = list_entry(head->next, struct urb, urb_list);
873 list_del(&rx_urb->urb_list);
874 usb_free_urb(rx_urb);
875 }
876 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
877
878 usb_put_dev(dev->udev);
879 kfree(dev);
880}
881
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800882/*bit position represents interface number*/
883#define PID9001_IFACE_MASK 0xC
884#define PID9034_IFACE_MASK 0xC
885#define PID9048_IFACE_MASK 0x18
886
Hemant Kumar14401d52011-11-03 16:40:32 -0700887static const struct usb_device_id bridge_ids[] = {
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800888 { USB_DEVICE(0x5c6, 0x9001),
889 .driver_info = PID9001_IFACE_MASK,
890 },
891 { USB_DEVICE(0x5c6, 0x9034),
892 .driver_info = PID9034_IFACE_MASK,
893 },
894 { USB_DEVICE(0x5c6, 0x9048),
895 .driver_info = PID9048_IFACE_MASK,
896 },
Jack Phamb1ad7152011-12-07 10:58:11 -0800897
898 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -0700899};
Hemant Kumar14401d52011-11-03 16:40:32 -0700900MODULE_DEVICE_TABLE(usb, bridge_ids);
901
902static struct usb_driver bridge_driver = {
903 .name = "mdm_bridge",
904 .probe = bridge_probe,
905 .disconnect = bridge_disconnect,
906 .id_table = bridge_ids,
907 .suspend = bridge_suspend,
908 .resume = bridge_resume,
909 .supports_autosuspend = 1,
910};
911
912static int __init bridge_init(void)
913{
914 int ret;
915
916 ret = usb_register(&bridge_driver);
917 if (ret) {
918 err("%s: unable to register mdm_bridge driver", __func__);
919 return ret;
920 }
921
922 bridge_wq = create_singlethread_workqueue("mdm_bridge");
923 if (!bridge_wq) {
924 usb_deregister(&bridge_driver);
925 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
926 return -ENOMEM;
927 }
928
929 data_bridge_debugfs_init();
930
931 return 0;
932}
933
934static void __exit bridge_exit(void)
935{
936 data_bridge_debugfs_exit();
937 destroy_workqueue(bridge_wq);
938 usb_deregister(&bridge_driver);
939}
940
941module_init(bridge_init);
942module_exit(bridge_exit);
943
944MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
945MODULE_LICENSE("GPL v2");