blob: 687c8c5938d348a64ac9a81b90092fb90a88b7e7 [file] [log] [blame]
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Hemant Kumar14401d52011-11-03 16:40:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
24#define MAX_RX_URBS 50
25#define RMNET_RX_BUFSIZE 2048
26
Hemant Kumarfd2d5e52011-12-22 17:51:53 -080027#define STOP_SUBMIT_URB_LIMIT 500
Hemant Kumar14401d52011-11-03 16:40:32 -070028#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
Hemant Kumar73eff1c2012-01-09 18:49:11 -080054static unsigned tx_urb_mult = 20;
55module_param(tx_urb_mult, uint, S_IRUGO|S_IWUSR);
56
Hemant Kumar14401d52011-11-03 16:40:32 -070057#define TX_HALT BIT(0)
58#define RX_HALT BIT(1)
59#define SUSPENDED BIT(2)
60
61struct data_bridge {
62 struct usb_interface *intf;
63 struct usb_device *udev;
64 unsigned int bulk_in;
65 unsigned int bulk_out;
Hemant Kumar06b7e432012-01-19 22:13:50 -080066 int err;
Hemant Kumar14401d52011-11-03 16:40:32 -070067
68 /* keep track of in-flight URBs */
69 struct usb_anchor tx_active;
70 struct usb_anchor rx_active;
71
72 /* keep track of outgoing URBs during suspend */
73 struct usb_anchor delayed;
74
75 struct list_head rx_idle;
76 struct sk_buff_head rx_done;
77
78 struct workqueue_struct *wq;
79 struct work_struct process_rx_w;
80
81 struct bridge *brdg;
82
83 /* work queue function for handling halt conditions */
84 struct work_struct kevent;
85
86 unsigned long flags;
87
88 struct platform_device *pdev;
89
90 /* counters */
91 atomic_t pending_txurbs;
92 unsigned int txurb_drp_cnt;
93 unsigned long to_host;
94 unsigned long to_modem;
95 unsigned int tx_throttled_cnt;
96 unsigned int tx_unthrottled_cnt;
97 unsigned int rx_throttled_cnt;
98 unsigned int rx_unthrottled_cnt;
99};
100
101static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
102
103/* counter used for indexing data bridge devices */
104static int ch_id;
105
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800106static unsigned int get_timestamp(void);
107static void dbg_timestamp(char *, struct sk_buff *);
Hemant Kumar14401d52011-11-03 16:40:32 -0700108static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
109 gfp_t flags);
110
111static inline bool rx_halted(struct data_bridge *dev)
112{
113 return test_bit(RX_HALT, &dev->flags);
114}
115
116static inline bool rx_throttled(struct bridge *brdg)
117{
118 return test_bit(RX_THROTTLED, &brdg->flags);
119}
120
121int data_bridge_unthrottle_rx(unsigned int id)
122{
123 struct data_bridge *dev;
124
125 if (id >= MAX_BRIDGE_DEVICES)
126 return -EINVAL;
127
128 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800129 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700130 return -ENODEV;
131
132 dev->rx_unthrottled_cnt++;
133 queue_work(dev->wq, &dev->process_rx_w);
134
135 return 0;
136}
137EXPORT_SYMBOL(data_bridge_unthrottle_rx);
138
139static void data_bridge_process_rx(struct work_struct *work)
140{
141 int retval;
142 unsigned long flags;
143 struct urb *rx_idle;
144 struct sk_buff *skb;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800145 struct timestamp_info *info;
Hemant Kumar14401d52011-11-03 16:40:32 -0700146 struct data_bridge *dev =
147 container_of(work, struct data_bridge, process_rx_w);
148
149 struct bridge *brdg = dev->brdg;
150
151 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
152 return;
153
154 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
155 dev->to_host++;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800156 info = (struct timestamp_info *)skb->cb;
157 info->rx_done_sent = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700158 /* hand off sk_buff to client,they'll need to free it */
159 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
160 if (retval == -ENOTCONN || retval == -EINVAL) {
161 return;
162 } else if (retval == -EBUSY) {
163 dev->rx_throttled_cnt++;
164 break;
165 }
166 }
167
168 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar14401d52011-11-03 16:40:32 -0700169 while (!list_empty(&dev->rx_idle)) {
Hemant Kumarfd2d5e52011-12-22 17:51:53 -0800170 if (dev->rx_done.qlen > stop_submit_urb_limit)
171 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700172
173 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
174 list_del(&rx_idle->urb_list);
175 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
176 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
177 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800178 if (retval) {
179 list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
Hemant Kumar14401d52011-11-03 16:40:32 -0700180 break;
Hemant Kumar184765b2011-12-27 13:20:45 -0800181 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700182 }
183 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
184}
185
186static void data_bridge_read_cb(struct urb *urb)
187{
188 struct bridge *brdg;
189 struct sk_buff *skb = urb->context;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800190 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
191 struct data_bridge *dev = info->dev;
Hemant Kumar14401d52011-11-03 16:40:32 -0700192 bool queue = 0;
193
194 brdg = dev->brdg;
Hemant Kumar14401d52011-11-03 16:40:32 -0700195 skb_put(skb, urb->actual_length);
196
197 switch (urb->status) {
198 case 0: /* success */
199 queue = 1;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800200 info->rx_done = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700201 spin_lock(&dev->rx_done.lock);
202 __skb_queue_tail(&dev->rx_done, skb);
203 spin_unlock(&dev->rx_done.lock);
204 break;
205
206 /*do not resubmit*/
207 case -EPIPE:
208 set_bit(RX_HALT, &dev->flags);
209 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
210 schedule_work(&dev->kevent);
211 /* FALLTHROUGH */
212 case -ESHUTDOWN:
213 case -ENOENT: /* suspended */
214 case -ECONNRESET: /* unplug */
215 case -EPROTO:
216 dev_kfree_skb_any(skb);
217 break;
218
219 /*resubmit */
220 case -EOVERFLOW: /*babble error*/
221 default:
222 queue = 1;
223 dev_kfree_skb_any(skb);
224 pr_debug_ratelimited("%s: non zero urb status = %d\n",
225 __func__, urb->status);
226 break;
227 }
228
229 spin_lock(&dev->rx_done.lock);
230 list_add_tail(&urb->urb_list, &dev->rx_idle);
231 spin_unlock(&dev->rx_done.lock);
232
233 if (queue)
234 queue_work(dev->wq, &dev->process_rx_w);
235}
236
237static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
238 gfp_t flags)
239{
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800240 struct sk_buff *skb;
241 struct timestamp_info *info;
242 int retval = -EINVAL;
243 unsigned int created;
Hemant Kumar14401d52011-11-03 16:40:32 -0700244
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800245 created = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700246 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800247 if (!skb)
Hemant Kumar14401d52011-11-03 16:40:32 -0700248 return -ENOMEM;
Hemant Kumar14401d52011-11-03 16:40:32 -0700249
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800250 info = (struct timestamp_info *)skb->cb;
251 info->dev = dev;
252 info->created = created;
Hemant Kumar14401d52011-11-03 16:40:32 -0700253
254 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
255 skb->data, RMNET_RX_BUFSIZE,
256 data_bridge_read_cb, skb);
257
258 if (test_bit(SUSPENDED, &dev->flags))
259 goto suspended;
260
261 usb_anchor_urb(rx_urb, &dev->rx_active);
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800262 info->rx_queued = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700263 retval = usb_submit_urb(rx_urb, flags);
264 if (retval)
265 goto fail;
266
267 return 0;
268fail:
269 usb_unanchor_urb(rx_urb);
270suspended:
271 dev_kfree_skb_any(skb);
Hemant Kumar184765b2011-12-27 13:20:45 -0800272
Hemant Kumar14401d52011-11-03 16:40:32 -0700273 return retval;
274}
275
276static int data_bridge_prepare_rx(struct data_bridge *dev)
277{
278 int i;
279 struct urb *rx_urb;
280
281 for (i = 0; i < max_rx_urbs; i++) {
282 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
283 if (!rx_urb)
284 return -ENOMEM;
285
286 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
287 }
288 return 0;
289}
290
291int data_bridge_open(struct bridge *brdg)
292{
293 struct data_bridge *dev;
294
295 if (!brdg) {
296 err("bridge is null\n");
297 return -EINVAL;
298 }
299
300 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
301 return -EINVAL;
302
303 dev = __dev[brdg->ch_id];
304 if (!dev) {
305 err("dev is null\n");
306 return -ENODEV;
307 }
308
309 dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev);
310
311 dev->brdg = brdg;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800312 dev->err = 0;
Hemant Kumar14401d52011-11-03 16:40:32 -0700313 atomic_set(&dev->pending_txurbs, 0);
314 dev->to_host = 0;
315 dev->to_modem = 0;
316 dev->txurb_drp_cnt = 0;
317 dev->tx_throttled_cnt = 0;
318 dev->tx_unthrottled_cnt = 0;
319 dev->rx_throttled_cnt = 0;
320 dev->rx_unthrottled_cnt = 0;
321
322 queue_work(dev->wq, &dev->process_rx_w);
323
324 return 0;
325}
326EXPORT_SYMBOL(data_bridge_open);
327
328void data_bridge_close(unsigned int id)
329{
330 struct data_bridge *dev;
331 struct sk_buff *skb;
332 unsigned long flags;
333
334 if (id >= MAX_BRIDGE_DEVICES)
335 return;
336
337 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800338 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700339 return;
340
341 dev_dbg(&dev->udev->dev, "%s:\n", __func__);
342
343 usb_unlink_anchored_urbs(&dev->tx_active);
344 usb_unlink_anchored_urbs(&dev->rx_active);
345 usb_unlink_anchored_urbs(&dev->delayed);
346
347 spin_lock_irqsave(&dev->rx_done.lock, flags);
348 while ((skb = __skb_dequeue(&dev->rx_done)))
349 dev_kfree_skb_any(skb);
350 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
351
352 dev->brdg = NULL;
353}
354EXPORT_SYMBOL(data_bridge_close);
355
356static void defer_kevent(struct work_struct *work)
357{
358 int status;
359 struct data_bridge *dev =
360 container_of(work, struct data_bridge, kevent);
361
362 if (!dev)
363 return;
364
365 if (test_bit(TX_HALT, &dev->flags)) {
366 usb_unlink_anchored_urbs(&dev->tx_active);
367
368 status = usb_autopm_get_interface(dev->intf);
369 if (status < 0) {
370 dev_err(&dev->udev->dev,
371 "can't acquire interface, status %d\n", status);
372 return;
373 }
374
375 status = usb_clear_halt(dev->udev, dev->bulk_out);
376 usb_autopm_put_interface(dev->intf);
377 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
378 dev_err(&dev->udev->dev,
379 "can't clear tx halt, status %d\n", status);
380 else
381 clear_bit(TX_HALT, &dev->flags);
382 }
383
384 if (test_bit(RX_HALT, &dev->flags)) {
385 usb_unlink_anchored_urbs(&dev->rx_active);
386
387 status = usb_autopm_get_interface(dev->intf);
388 if (status < 0) {
389 dev_err(&dev->udev->dev,
390 "can't acquire interface, status %d\n", status);
391 return;
392 }
393
394 status = usb_clear_halt(dev->udev, dev->bulk_in);
395 usb_autopm_put_interface(dev->intf);
396 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
397 dev_err(&dev->udev->dev,
398 "can't clear rx halt, status %d\n", status);
399 else {
400 clear_bit(RX_HALT, &dev->flags);
401 if (dev->brdg)
402 queue_work(dev->wq, &dev->process_rx_w);
403 }
404 }
405}
406
407static void data_bridge_write_cb(struct urb *urb)
408{
409 struct sk_buff *skb = urb->context;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800410 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
411 struct data_bridge *dev = info->dev;
Hemant Kumar14401d52011-11-03 16:40:32 -0700412 struct bridge *brdg = dev->brdg;
413 int pending;
414
415 pr_debug("%s: dev:%p\n", __func__, dev);
416
417 switch (urb->status) {
418 case 0: /*success*/
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800419 dbg_timestamp("UL", skb);
Hemant Kumar14401d52011-11-03 16:40:32 -0700420 break;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800421 case -EPROTO:
422 dev->err = -EPROTO;
423 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700424 case -EPIPE:
425 set_bit(TX_HALT, &dev->flags);
426 dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
427 schedule_work(&dev->kevent);
428 /* FALLTHROUGH */
429 case -ESHUTDOWN:
430 case -ENOENT: /* suspended */
431 case -ECONNRESET: /* unplug */
432 case -EOVERFLOW: /*babble error*/
433 /* FALLTHROUGH */
434 default:
435 pr_debug_ratelimited("%s: non zero urb status = %d\n",
436 __func__, urb->status);
437 }
438
439 usb_free_urb(urb);
440 dev_kfree_skb_any(skb);
441
442 pending = atomic_dec_return(&dev->pending_txurbs);
443
444 /*flow ctrl*/
445 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
446 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
447 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
448 __func__, pending);
449 dev->tx_unthrottled_cnt++;
450 if (brdg->ops.unthrottle_tx)
451 brdg->ops.unthrottle_tx(brdg->ctx);
452 }
453
454 usb_autopm_put_interface_async(dev->intf);
455}
456
457int data_bridge_write(unsigned int id, struct sk_buff *skb)
458{
459 int result;
460 int size = skb->len;
461 int pending;
462 struct urb *txurb;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800463 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
Hemant Kumar14401d52011-11-03 16:40:32 -0700464 struct data_bridge *dev = __dev[id];
465 struct bridge *brdg;
466
Hemant Kumar06b7e432012-01-19 22:13:50 -0800467 if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
Hemant Kumar14401d52011-11-03 16:40:32 -0700468 return -ENODEV;
469
470 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800471 if (!brdg)
472 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700473
474 dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len);
475
476 result = usb_autopm_get_interface(dev->intf);
477 if (result < 0) {
478 dev_err(&dev->udev->dev, "%s: resume failure\n", __func__);
479 goto error;
480 }
481
482 txurb = usb_alloc_urb(0, GFP_KERNEL);
483 if (!txurb) {
484 dev_err(&dev->udev->dev, "%s: error allocating read urb\n",
485 __func__);
486 result = -ENOMEM;
487 goto error;
488 }
489
490 /* store dev pointer in skb */
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800491 info->dev = dev;
492 info->tx_queued = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700493
494 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
495 skb->data, skb->len, data_bridge_write_cb, skb);
496
497 if (test_bit(SUSPENDED, &dev->flags)) {
498 usb_anchor_urb(txurb, &dev->delayed);
499 goto free_urb;
500 }
501
502 pending = atomic_inc_return(&dev->pending_txurbs);
503 usb_anchor_urb(txurb, &dev->tx_active);
504
Hemant Kumar73eff1c2012-01-09 18:49:11 -0800505 if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
506 txurb->transfer_flags |= URB_NO_INTERRUPT;
507
Hemant Kumar14401d52011-11-03 16:40:32 -0700508 result = usb_submit_urb(txurb, GFP_KERNEL);
509 if (result < 0) {
510 usb_unanchor_urb(txurb);
511 atomic_dec(&dev->pending_txurbs);
512 dev_err(&dev->udev->dev, "%s: submit URB error %d\n",
513 __func__, result);
514 goto free_urb;
515 }
516
517 dev->to_modem++;
518 dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending);
519
520 /* flow control: last urb submitted but return -EBUSY */
521 if (fctrl_support && pending > fctrl_en_thld) {
522 set_bit(TX_THROTTLED, &brdg->flags);
523 dev->tx_throttled_cnt++;
524 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
525 __func__, pending);
526 return -EBUSY;
527 }
528
529 return size;
530
531free_urb:
532 usb_free_urb(txurb);
533error:
534 dev->txurb_drp_cnt++;
535 usb_autopm_put_interface(dev->intf);
536
537 return result;
538}
539EXPORT_SYMBOL(data_bridge_write);
540
541static int data_bridge_resume(struct data_bridge *dev)
542{
543 struct urb *urb;
544 int retval;
545
546 while ((urb = usb_get_from_anchor(&dev->delayed))) {
547 usb_anchor_urb(urb, &dev->tx_active);
548 atomic_inc(&dev->pending_txurbs);
549 retval = usb_submit_urb(urb, GFP_ATOMIC);
550 if (retval < 0) {
551 atomic_dec(&dev->pending_txurbs);
552 usb_unanchor_urb(urb);
553
554 /* TODO: need to free urb data */
555 usb_scuttle_anchored_urbs(&dev->delayed);
556 break;
557 }
558 dev->to_modem++;
559 dev->txurb_drp_cnt--;
560 }
561
562 clear_bit(SUSPENDED, &dev->flags);
563
564 if (dev->brdg)
565 queue_work(dev->wq, &dev->process_rx_w);
566
567 return 0;
568}
569
570static int bridge_resume(struct usb_interface *iface)
571{
572 int retval = 0;
573 int oldstate;
574 struct data_bridge *dev = usb_get_intfdata(iface);
575 struct bridge *brdg = dev->brdg;
576
577 oldstate = iface->dev.power.power_state.event;
578 iface->dev.power.power_state.event = PM_EVENT_ON;
579
580 retval = data_bridge_resume(dev);
581 if (!retval) {
582 if (oldstate & PM_EVENT_SUSPEND && brdg)
583 retval = ctrl_bridge_resume(brdg->ch_id);
584 }
585 return retval;
586}
587
588static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
589{
590 if (atomic_read(&dev->pending_txurbs) &&
591 (message.event & PM_EVENT_AUTO))
592 return -EBUSY;
593
594 set_bit(SUSPENDED, &dev->flags);
595
596 usb_kill_anchored_urbs(&dev->tx_active);
597 usb_kill_anchored_urbs(&dev->rx_active);
598
599 return 0;
600}
601
602static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
603{
604 int retval;
605 struct data_bridge *dev = usb_get_intfdata(intf);
606 struct bridge *brdg = dev->brdg;
607
608 retval = data_bridge_suspend(dev, message);
609 if (!retval) {
610 if (message.event & PM_EVENT_SUSPEND) {
611 if (brdg)
612 retval = ctrl_bridge_suspend(brdg->ch_id);
613 intf->dev.power.power_state.event = message.event;
614 }
615 } else {
616 dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n",
617 __func__);
618 }
619 return retval;
620}
621
622static int data_bridge_probe(struct usb_interface *iface,
623 struct usb_host_endpoint *bulk_in,
624 struct usb_host_endpoint *bulk_out, int id)
625{
626 struct data_bridge *dev;
627
628 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
629 if (!dev) {
630 err("%s: unable to allocate dev\n", __func__);
631 return -ENOMEM;
632 }
633
634 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
635 if (!dev->pdev) {
636 err("%s: unable to allocate platform device\n", __func__);
637 kfree(dev);
638 return -ENOMEM;
639 }
640
641 init_usb_anchor(&dev->tx_active);
642 init_usb_anchor(&dev->rx_active);
643 init_usb_anchor(&dev->delayed);
644
645 INIT_LIST_HEAD(&dev->rx_idle);
646 skb_queue_head_init(&dev->rx_done);
647
648 dev->wq = bridge_wq;
649
650 dev->udev = interface_to_usbdev(iface);
651 dev->intf = iface;
652
653 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
654 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
655
656 dev->bulk_out = usb_sndbulkpipe(dev->udev,
657 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
658
659 usb_set_intfdata(iface, dev);
660
661 INIT_WORK(&dev->kevent, defer_kevent);
662 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
663
664 __dev[id] = dev;
665
666 /*allocate list of rx urbs*/
667 data_bridge_prepare_rx(dev);
668
669 platform_device_add(dev->pdev);
670
671 return 0;
672}
673
674#if defined(CONFIG_DEBUG_FS)
675#define DEBUG_BUF_SIZE 1024
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800676
677static unsigned int record_timestamp;
678module_param(record_timestamp, uint, S_IRUGO | S_IWUSR);
679
680static struct timestamp_buf dbg_data = {
681 .idx = 0,
682 .lck = __RW_LOCK_UNLOCKED(lck)
683};
684
685/*get_timestamp - returns time of day in us */
686static unsigned int get_timestamp(void)
687{
688 struct timeval tval;
689 unsigned int stamp;
690
691 if (!record_timestamp)
692 return 0;
693
694 do_gettimeofday(&tval);
695 /* 2^32 = 4294967296. Limit to 4096s. */
696 stamp = tval.tv_sec & 0xFFF;
697 stamp = stamp * 1000000 + tval.tv_usec;
698 return stamp;
699}
700
701static void dbg_inc(unsigned *idx)
702{
703 *idx = (*idx + 1) & (DBG_DATA_MAX-1);
704}
705
706/**
707* dbg_timestamp - Stores timestamp values of a SKB life cycle
708* to debug buffer
709* @event: "UL": Uplink Data
710* @skb: SKB used to store timestamp values to debug buffer
711*/
712static void dbg_timestamp(char *event, struct sk_buff * skb)
713{
714 unsigned long flags;
715 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
716
717 if (!record_timestamp)
718 return;
719
720 write_lock_irqsave(&dbg_data.lck, flags);
721
722 scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
723 "%p %u[%s] %u %u %u %u %u %u\n",
724 skb, skb->len, event, info->created, info->rx_queued,
725 info->rx_done, info->rx_done_sent, info->tx_queued,
726 get_timestamp());
727
728 dbg_inc(&dbg_data.idx);
729
730 write_unlock_irqrestore(&dbg_data.lck, flags);
731}
732
733/* show_timestamp: displays the timestamp buffer */
734static ssize_t show_timestamp(struct file *file, char __user *ubuf,
735 size_t count, loff_t *ppos)
736{
737 unsigned long flags;
738 unsigned i;
739 unsigned j = 0;
740 char *buf;
741 int ret = 0;
742
743 if (!record_timestamp)
744 return 0;
745
746 buf = kzalloc(sizeof(char) * 4 * DEBUG_BUF_SIZE, GFP_KERNEL);
747 if (!buf)
748 return -ENOMEM;
749
750 read_lock_irqsave(&dbg_data.lck, flags);
751
752 i = dbg_data.idx;
753 for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) {
754 if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG))
755 continue;
756 j += scnprintf(buf + j, (4 * DEBUG_BUF_SIZE) - j,
757 "%s\n", dbg_data.buf[i]);
758 }
759
760 read_unlock_irqrestore(&dbg_data.lck, flags);
761
762 ret = simple_read_from_buffer(ubuf, count, ppos, buf, j);
763
764 kfree(buf);
765
766 return ret;
767}
768
769const struct file_operations data_timestamp_ops = {
770 .read = show_timestamp,
771};
772
Hemant Kumar14401d52011-11-03 16:40:32 -0700773static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
774 size_t count, loff_t *ppos)
775{
776 struct data_bridge *dev;
777 char *buf;
778 int ret;
779 int i;
780 int temp = 0;
781
782 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
783 if (!buf)
784 return -ENOMEM;
785
786 for (i = 0; i < ch_id; i++) {
787 dev = __dev[i];
788 if (!dev)
789 continue;
790
791 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
792 "\nName#%s dev %p\n"
793 "pending tx urbs: %u\n"
794 "tx urb drp cnt: %u\n"
795 "to host: %lu\n"
796 "to mdm: %lu\n"
797 "tx throttled cnt: %u\n"
798 "tx unthrottled cnt: %u\n"
799 "rx throttled cnt: %u\n"
800 "rx unthrottled cnt: %u\n"
801 "rx done skb qlen: %u\n"
Hemant Kumar06b7e432012-01-19 22:13:50 -0800802 "dev err: %d\n"
Hemant Kumar14401d52011-11-03 16:40:32 -0700803 "suspended: %d\n"
804 "TX_HALT: %d\n"
805 "RX_HALT: %d\n",
806 dev->pdev->name, dev,
807 atomic_read(&dev->pending_txurbs),
808 dev->txurb_drp_cnt,
809 dev->to_host,
810 dev->to_modem,
811 dev->tx_throttled_cnt,
812 dev->tx_unthrottled_cnt,
813 dev->rx_throttled_cnt,
814 dev->rx_unthrottled_cnt,
815 dev->rx_done.qlen,
Hemant Kumar06b7e432012-01-19 22:13:50 -0800816 dev->err,
Hemant Kumar14401d52011-11-03 16:40:32 -0700817 test_bit(SUSPENDED, &dev->flags),
818 test_bit(TX_HALT, &dev->flags),
819 test_bit(RX_HALT, &dev->flags));
820
821 }
822
823 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
824
825 kfree(buf);
826
827 return ret;
828}
829
830static ssize_t data_bridge_reset_stats(struct file *file,
831 const char __user *buf, size_t count, loff_t *ppos)
832{
833 struct data_bridge *dev;
834 int i;
835
836 for (i = 0; i < ch_id; i++) {
837 dev = __dev[i];
838 if (!dev)
839 continue;
840
841 dev->to_host = 0;
842 dev->to_modem = 0;
843 dev->txurb_drp_cnt = 0;
844 dev->tx_throttled_cnt = 0;
845 dev->tx_unthrottled_cnt = 0;
846 dev->rx_throttled_cnt = 0;
847 dev->rx_unthrottled_cnt = 0;
848 }
849 return count;
850}
851
852const struct file_operations data_stats_ops = {
853 .read = data_bridge_read_stats,
854 .write = data_bridge_reset_stats,
855};
856
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800857static struct dentry *data_dent;
858static struct dentry *data_dfile_stats;
859static struct dentry *data_dfile_tstamp;
860
Hemant Kumar14401d52011-11-03 16:40:32 -0700861static void data_bridge_debugfs_init(void)
862{
863 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
864 if (IS_ERR(data_dent))
865 return;
866
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800867 data_dfile_stats = debugfs_create_file("status", 0644, data_dent, 0,
868 &data_stats_ops);
869 if (!data_dfile_stats || IS_ERR(data_dfile_stats)) {
870 debugfs_remove(data_dent);
871 return;
872 }
873
874 data_dfile_tstamp = debugfs_create_file("timestamp", 0644, data_dent,
875 0, &data_timestamp_ops);
876 if (!data_dfile_tstamp || IS_ERR(data_dfile_tstamp))
Hemant Kumar14401d52011-11-03 16:40:32 -0700877 debugfs_remove(data_dent);
878}
879
880static void data_bridge_debugfs_exit(void)
881{
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800882 debugfs_remove(data_dfile_stats);
883 debugfs_remove(data_dfile_tstamp);
Hemant Kumar14401d52011-11-03 16:40:32 -0700884 debugfs_remove(data_dent);
885}
886
887#else
888static void data_bridge_debugfs_init(void) { }
889static void data_bridge_debugfs_exit(void) { }
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800890static void dbg_timestamp(char *event, struct sk_buff * skb)
891{
892 return;
893}
894
895static unsigned int get_timestamp(void)
896{
897 return 0;
898}
899
Hemant Kumar14401d52011-11-03 16:40:32 -0700900#endif
901
902static int __devinit
903bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
904{
905 struct usb_host_endpoint *endpoint = NULL;
906 struct usb_host_endpoint *bulk_in = NULL;
907 struct usb_host_endpoint *bulk_out = NULL;
908 struct usb_host_endpoint *int_in = NULL;
909 struct usb_device *udev;
910 int i;
911 int status = 0;
912 int numends;
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800913 unsigned int iface_num;
Hemant Kumar14401d52011-11-03 16:40:32 -0700914
915 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
916
917 if (iface->num_altsetting != 1) {
918 err("%s invalid num_altsetting %u\n",
919 __func__, iface->num_altsetting);
920 return -EINVAL;
921 }
922
923 udev = interface_to_usbdev(iface);
924 usb_get_dev(udev);
925
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800926 if (!test_bit(iface_num, &id->driver_info))
Hemant Kumar46f9f242011-12-15 20:20:58 -0800927 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700928
929 numends = iface->cur_altsetting->desc.bNumEndpoints;
930 for (i = 0; i < numends; i++) {
931 endpoint = iface->cur_altsetting->endpoint + i;
932 if (!endpoint) {
933 dev_err(&udev->dev, "%s: invalid endpoint %u\n",
934 __func__, i);
935 status = -EINVAL;
936 goto out;
937 }
938
939 if (usb_endpoint_is_bulk_in(&endpoint->desc))
940 bulk_in = endpoint;
941 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
942 bulk_out = endpoint;
943 else if (usb_endpoint_is_int_in(&endpoint->desc))
944 int_in = endpoint;
945 }
946
947 if (!bulk_in || !bulk_out || !int_in) {
948 dev_err(&udev->dev, "%s: invalid endpoints\n", __func__);
949 status = -EINVAL;
950 goto out;
951 }
952
953 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
954 if (status < 0) {
955 dev_err(&udev->dev, "data_bridge_probe failed %d\n", status);
956 goto out;
957 }
958
959 status = ctrl_bridge_probe(iface, int_in, ch_id);
960 if (status < 0) {
961 dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status);
962 goto free_data_bridge;
963 }
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800964
Hemant Kumar14401d52011-11-03 16:40:32 -0700965 ch_id++;
966
967 return 0;
968
969free_data_bridge:
970 platform_device_del(__dev[ch_id]->pdev);
971 usb_set_intfdata(iface, NULL);
972 kfree(__dev[ch_id]);
973 __dev[ch_id] = NULL;
974out:
975 usb_put_dev(udev);
976
977 return status;
978}
979
980static void bridge_disconnect(struct usb_interface *intf)
981{
982 struct data_bridge *dev = usb_get_intfdata(intf);
983 struct list_head *head;
984 struct urb *rx_urb;
985 unsigned long flags;
Hemant Kumar14401d52011-11-03 16:40:32 -0700986
987 if (!dev) {
988 err("%s: data device not found\n", __func__);
989 return;
990 }
991
Hemant Kumar14401d52011-11-03 16:40:32 -0700992 ch_id--;
993 ctrl_bridge_disconnect(ch_id);
994 platform_device_del(dev->pdev);
995 usb_set_intfdata(intf, NULL);
996 __dev[ch_id] = NULL;
997
998 cancel_work_sync(&dev->process_rx_w);
999 cancel_work_sync(&dev->kevent);
1000
1001 /*free rx urbs*/
1002 head = &dev->rx_idle;
1003 spin_lock_irqsave(&dev->rx_done.lock, flags);
1004 while (!list_empty(head)) {
1005 rx_urb = list_entry(head->next, struct urb, urb_list);
1006 list_del(&rx_urb->urb_list);
1007 usb_free_urb(rx_urb);
1008 }
1009 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
1010
1011 usb_put_dev(dev->udev);
1012 kfree(dev);
1013}
1014
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001015/*bit position represents interface number*/
1016#define PID9001_IFACE_MASK 0xC
1017#define PID9034_IFACE_MASK 0xC
1018#define PID9048_IFACE_MASK 0x18
1019
Hemant Kumar14401d52011-11-03 16:40:32 -07001020static const struct usb_device_id bridge_ids[] = {
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001021 { USB_DEVICE(0x5c6, 0x9001),
1022 .driver_info = PID9001_IFACE_MASK,
1023 },
1024 { USB_DEVICE(0x5c6, 0x9034),
1025 .driver_info = PID9034_IFACE_MASK,
1026 },
1027 { USB_DEVICE(0x5c6, 0x9048),
1028 .driver_info = PID9048_IFACE_MASK,
1029 },
Jack Phamb1ad7152011-12-07 10:58:11 -08001030
1031 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -07001032};
Hemant Kumar14401d52011-11-03 16:40:32 -07001033MODULE_DEVICE_TABLE(usb, bridge_ids);
1034
1035static struct usb_driver bridge_driver = {
1036 .name = "mdm_bridge",
1037 .probe = bridge_probe,
1038 .disconnect = bridge_disconnect,
1039 .id_table = bridge_ids,
1040 .suspend = bridge_suspend,
1041 .resume = bridge_resume,
1042 .supports_autosuspend = 1,
1043};
1044
1045static int __init bridge_init(void)
1046{
1047 int ret;
1048
1049 ret = usb_register(&bridge_driver);
1050 if (ret) {
1051 err("%s: unable to register mdm_bridge driver", __func__);
1052 return ret;
1053 }
1054
1055 bridge_wq = create_singlethread_workqueue("mdm_bridge");
1056 if (!bridge_wq) {
1057 usb_deregister(&bridge_driver);
1058 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
1059 return -ENOMEM;
1060 }
1061
1062 data_bridge_debugfs_init();
1063
1064 return 0;
1065}
1066
1067static void __exit bridge_exit(void)
1068{
1069 data_bridge_debugfs_exit();
1070 destroy_workqueue(bridge_wq);
1071 usb_deregister(&bridge_driver);
1072}
1073
1074module_init(bridge_init);
1075module_exit(bridge_exit);
1076
1077MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
1078MODULE_LICENSE("GPL v2");