blob: fcbf0e1574be120f048251a91163bc8dafcbdae8 [file] [log] [blame]
Pavankumar Kondeti50f38152013-01-17 16:04:53 +05301/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
Hemant Kumar14401d52011-11-03 16:40:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
Hemant Kumare8f691f2012-10-03 17:15:28 -070024#define MAX_RX_URBS 100
Hemant Kumar14401d52011-11-03 16:40:32 -070025#define RMNET_RX_BUFSIZE 2048
26
Hemant Kumarfd2d5e52011-12-22 17:51:53 -080027#define STOP_SUBMIT_URB_LIMIT 500
Hemant Kumar14401d52011-11-03 16:40:32 -070028#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
Hemant Kumar73eff1c2012-01-09 18:49:11 -080054static unsigned tx_urb_mult = 20;
55module_param(tx_urb_mult, uint, S_IRUGO|S_IWUSR);
56
Hemant Kumar14401d52011-11-03 16:40:32 -070057#define TX_HALT BIT(0)
58#define RX_HALT BIT(1)
59#define SUSPENDED BIT(2)
60
61struct data_bridge {
62 struct usb_interface *intf;
63 struct usb_device *udev;
Jack Pham9b0a0e52012-02-28 18:05:14 -080064 int id;
65
Hemant Kumar14401d52011-11-03 16:40:32 -070066 unsigned int bulk_in;
67 unsigned int bulk_out;
Hemant Kumar06b7e432012-01-19 22:13:50 -080068 int err;
Hemant Kumar14401d52011-11-03 16:40:32 -070069
70 /* keep track of in-flight URBs */
71 struct usb_anchor tx_active;
72 struct usb_anchor rx_active;
73
74 /* keep track of outgoing URBs during suspend */
75 struct usb_anchor delayed;
76
77 struct list_head rx_idle;
78 struct sk_buff_head rx_done;
79
80 struct workqueue_struct *wq;
81 struct work_struct process_rx_w;
82
83 struct bridge *brdg;
84
85 /* work queue function for handling halt conditions */
86 struct work_struct kevent;
87
88 unsigned long flags;
89
90 struct platform_device *pdev;
91
92 /* counters */
93 atomic_t pending_txurbs;
94 unsigned int txurb_drp_cnt;
95 unsigned long to_host;
96 unsigned long to_modem;
97 unsigned int tx_throttled_cnt;
98 unsigned int tx_unthrottled_cnt;
99 unsigned int rx_throttled_cnt;
100 unsigned int rx_unthrottled_cnt;
101};
102
103static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
104
105/* counter used for indexing data bridge devices */
106static int ch_id;
107
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800108static unsigned int get_timestamp(void);
109static void dbg_timestamp(char *, struct sk_buff *);
Hemant Kumar14401d52011-11-03 16:40:32 -0700110static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
111 gfp_t flags);
112
113static inline bool rx_halted(struct data_bridge *dev)
114{
115 return test_bit(RX_HALT, &dev->flags);
116}
117
118static inline bool rx_throttled(struct bridge *brdg)
119{
120 return test_bit(RX_THROTTLED, &brdg->flags);
121}
122
123int data_bridge_unthrottle_rx(unsigned int id)
124{
125 struct data_bridge *dev;
126
127 if (id >= MAX_BRIDGE_DEVICES)
128 return -EINVAL;
129
130 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800131 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700132 return -ENODEV;
133
134 dev->rx_unthrottled_cnt++;
135 queue_work(dev->wq, &dev->process_rx_w);
136
137 return 0;
138}
139EXPORT_SYMBOL(data_bridge_unthrottle_rx);
140
141static void data_bridge_process_rx(struct work_struct *work)
142{
143 int retval;
144 unsigned long flags;
145 struct urb *rx_idle;
146 struct sk_buff *skb;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800147 struct timestamp_info *info;
Hemant Kumar14401d52011-11-03 16:40:32 -0700148 struct data_bridge *dev =
149 container_of(work, struct data_bridge, process_rx_w);
150
151 struct bridge *brdg = dev->brdg;
152
153 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
154 return;
155
156 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
157 dev->to_host++;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800158 info = (struct timestamp_info *)skb->cb;
159 info->rx_done_sent = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700160 /* hand off sk_buff to client,they'll need to free it */
161 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
162 if (retval == -ENOTCONN || retval == -EINVAL) {
163 return;
164 } else if (retval == -EBUSY) {
165 dev->rx_throttled_cnt++;
166 break;
167 }
168 }
169
170 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar14401d52011-11-03 16:40:32 -0700171 while (!list_empty(&dev->rx_idle)) {
Hemant Kumarfd2d5e52011-12-22 17:51:53 -0800172 if (dev->rx_done.qlen > stop_submit_urb_limit)
173 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700174
175 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
176 list_del(&rx_idle->urb_list);
177 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
178 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
179 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800180 if (retval) {
181 list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
Hemant Kumar14401d52011-11-03 16:40:32 -0700182 break;
Hemant Kumar184765b2011-12-27 13:20:45 -0800183 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700184 }
185 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
186}
187
188static void data_bridge_read_cb(struct urb *urb)
189{
190 struct bridge *brdg;
191 struct sk_buff *skb = urb->context;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800192 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
193 struct data_bridge *dev = info->dev;
Hemant Kumar14401d52011-11-03 16:40:32 -0700194 bool queue = 0;
195
196 brdg = dev->brdg;
Hemant Kumar14401d52011-11-03 16:40:32 -0700197 skb_put(skb, urb->actual_length);
198
199 switch (urb->status) {
200 case 0: /* success */
201 queue = 1;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800202 info->rx_done = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700203 spin_lock(&dev->rx_done.lock);
204 __skb_queue_tail(&dev->rx_done, skb);
205 spin_unlock(&dev->rx_done.lock);
206 break;
207
208 /*do not resubmit*/
209 case -EPIPE:
210 set_bit(RX_HALT, &dev->flags);
Jack Phame8741502012-06-13 17:34:07 -0700211 dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700212 schedule_work(&dev->kevent);
213 /* FALLTHROUGH */
214 case -ESHUTDOWN:
215 case -ENOENT: /* suspended */
216 case -ECONNRESET: /* unplug */
217 case -EPROTO:
218 dev_kfree_skb_any(skb);
219 break;
220
221 /*resubmit */
222 case -EOVERFLOW: /*babble error*/
223 default:
224 queue = 1;
225 dev_kfree_skb_any(skb);
226 pr_debug_ratelimited("%s: non zero urb status = %d\n",
227 __func__, urb->status);
228 break;
229 }
230
231 spin_lock(&dev->rx_done.lock);
232 list_add_tail(&urb->urb_list, &dev->rx_idle);
233 spin_unlock(&dev->rx_done.lock);
234
235 if (queue)
236 queue_work(dev->wq, &dev->process_rx_w);
237}
238
239static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
240 gfp_t flags)
241{
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800242 struct sk_buff *skb;
243 struct timestamp_info *info;
244 int retval = -EINVAL;
245 unsigned int created;
Hemant Kumar14401d52011-11-03 16:40:32 -0700246
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800247 created = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700248 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800249 if (!skb)
Hemant Kumar14401d52011-11-03 16:40:32 -0700250 return -ENOMEM;
Hemant Kumar14401d52011-11-03 16:40:32 -0700251
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800252 info = (struct timestamp_info *)skb->cb;
253 info->dev = dev;
254 info->created = created;
Hemant Kumar14401d52011-11-03 16:40:32 -0700255
256 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
257 skb->data, RMNET_RX_BUFSIZE,
258 data_bridge_read_cb, skb);
259
260 if (test_bit(SUSPENDED, &dev->flags))
261 goto suspended;
262
263 usb_anchor_urb(rx_urb, &dev->rx_active);
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800264 info->rx_queued = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700265 retval = usb_submit_urb(rx_urb, flags);
266 if (retval)
267 goto fail;
268
Hemant Kumarb72233f2012-04-04 13:21:44 -0700269 usb_mark_last_busy(dev->udev);
Hemant Kumar14401d52011-11-03 16:40:32 -0700270 return 0;
271fail:
272 usb_unanchor_urb(rx_urb);
273suspended:
274 dev_kfree_skb_any(skb);
Hemant Kumar184765b2011-12-27 13:20:45 -0800275
Hemant Kumar14401d52011-11-03 16:40:32 -0700276 return retval;
277}
278
279static int data_bridge_prepare_rx(struct data_bridge *dev)
280{
281 int i;
282 struct urb *rx_urb;
283
284 for (i = 0; i < max_rx_urbs; i++) {
285 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
286 if (!rx_urb)
287 return -ENOMEM;
288
289 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
290 }
291 return 0;
292}
293
294int data_bridge_open(struct bridge *brdg)
295{
296 struct data_bridge *dev;
297
298 if (!brdg) {
299 err("bridge is null\n");
300 return -EINVAL;
301 }
302
303 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
304 return -EINVAL;
305
306 dev = __dev[brdg->ch_id];
307 if (!dev) {
308 err("dev is null\n");
309 return -ENODEV;
310 }
311
Jack Phame8741502012-06-13 17:34:07 -0700312 dev_dbg(&dev->intf->dev, "%s: dev:%p\n", __func__, dev);
Hemant Kumar14401d52011-11-03 16:40:32 -0700313
314 dev->brdg = brdg;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800315 dev->err = 0;
Hemant Kumar14401d52011-11-03 16:40:32 -0700316 atomic_set(&dev->pending_txurbs, 0);
317 dev->to_host = 0;
318 dev->to_modem = 0;
319 dev->txurb_drp_cnt = 0;
320 dev->tx_throttled_cnt = 0;
321 dev->tx_unthrottled_cnt = 0;
322 dev->rx_throttled_cnt = 0;
323 dev->rx_unthrottled_cnt = 0;
324
325 queue_work(dev->wq, &dev->process_rx_w);
326
327 return 0;
328}
329EXPORT_SYMBOL(data_bridge_open);
330
331void data_bridge_close(unsigned int id)
332{
333 struct data_bridge *dev;
334 struct sk_buff *skb;
335 unsigned long flags;
336
337 if (id >= MAX_BRIDGE_DEVICES)
338 return;
339
340 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800341 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700342 return;
343
Jack Phame8741502012-06-13 17:34:07 -0700344 dev_dbg(&dev->intf->dev, "%s:\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700345
Pavankumar Kondeti50f38152013-01-17 16:04:53 +0530346 cancel_work_sync(&dev->kevent);
347 cancel_work_sync(&dev->process_rx_w);
348
Hemant Kumar14401d52011-11-03 16:40:32 -0700349 usb_unlink_anchored_urbs(&dev->tx_active);
350 usb_unlink_anchored_urbs(&dev->rx_active);
351 usb_unlink_anchored_urbs(&dev->delayed);
352
353 spin_lock_irqsave(&dev->rx_done.lock, flags);
354 while ((skb = __skb_dequeue(&dev->rx_done)))
355 dev_kfree_skb_any(skb);
356 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
357
358 dev->brdg = NULL;
359}
360EXPORT_SYMBOL(data_bridge_close);
361
362static void defer_kevent(struct work_struct *work)
363{
364 int status;
365 struct data_bridge *dev =
366 container_of(work, struct data_bridge, kevent);
367
368 if (!dev)
369 return;
370
371 if (test_bit(TX_HALT, &dev->flags)) {
372 usb_unlink_anchored_urbs(&dev->tx_active);
373
374 status = usb_autopm_get_interface(dev->intf);
375 if (status < 0) {
Jack Pham4380e002012-08-30 19:06:24 -0700376 dev_dbg(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700377 "can't acquire interface, status %d\n", status);
378 return;
379 }
380
381 status = usb_clear_halt(dev->udev, dev->bulk_out);
382 usb_autopm_put_interface(dev->intf);
383 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
Jack Phame8741502012-06-13 17:34:07 -0700384 dev_err(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700385 "can't clear tx halt, status %d\n", status);
386 else
387 clear_bit(TX_HALT, &dev->flags);
388 }
389
390 if (test_bit(RX_HALT, &dev->flags)) {
391 usb_unlink_anchored_urbs(&dev->rx_active);
392
393 status = usb_autopm_get_interface(dev->intf);
394 if (status < 0) {
Jack Pham4380e002012-08-30 19:06:24 -0700395 dev_dbg(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700396 "can't acquire interface, status %d\n", status);
397 return;
398 }
399
400 status = usb_clear_halt(dev->udev, dev->bulk_in);
401 usb_autopm_put_interface(dev->intf);
402 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
Jack Phame8741502012-06-13 17:34:07 -0700403 dev_err(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700404 "can't clear rx halt, status %d\n", status);
405 else {
406 clear_bit(RX_HALT, &dev->flags);
407 if (dev->brdg)
408 queue_work(dev->wq, &dev->process_rx_w);
409 }
410 }
411}
412
413static void data_bridge_write_cb(struct urb *urb)
414{
415 struct sk_buff *skb = urb->context;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800416 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
417 struct data_bridge *dev = info->dev;
Hemant Kumar14401d52011-11-03 16:40:32 -0700418 struct bridge *brdg = dev->brdg;
419 int pending;
420
421 pr_debug("%s: dev:%p\n", __func__, dev);
422
423 switch (urb->status) {
424 case 0: /*success*/
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800425 dbg_timestamp("UL", skb);
Hemant Kumar14401d52011-11-03 16:40:32 -0700426 break;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800427 case -EPROTO:
428 dev->err = -EPROTO;
429 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700430 case -EPIPE:
431 set_bit(TX_HALT, &dev->flags);
Jack Phame8741502012-06-13 17:34:07 -0700432 dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700433 schedule_work(&dev->kevent);
434 /* FALLTHROUGH */
435 case -ESHUTDOWN:
436 case -ENOENT: /* suspended */
437 case -ECONNRESET: /* unplug */
438 case -EOVERFLOW: /*babble error*/
439 /* FALLTHROUGH */
440 default:
441 pr_debug_ratelimited("%s: non zero urb status = %d\n",
442 __func__, urb->status);
443 }
444
445 usb_free_urb(urb);
446 dev_kfree_skb_any(skb);
447
448 pending = atomic_dec_return(&dev->pending_txurbs);
449
450 /*flow ctrl*/
451 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
452 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
453 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
454 __func__, pending);
455 dev->tx_unthrottled_cnt++;
456 if (brdg->ops.unthrottle_tx)
457 brdg->ops.unthrottle_tx(brdg->ctx);
458 }
459
460 usb_autopm_put_interface_async(dev->intf);
461}
462
463int data_bridge_write(unsigned int id, struct sk_buff *skb)
464{
465 int result;
466 int size = skb->len;
467 int pending;
468 struct urb *txurb;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800469 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
Hemant Kumar14401d52011-11-03 16:40:32 -0700470 struct data_bridge *dev = __dev[id];
471 struct bridge *brdg;
472
Hemant Kumar06b7e432012-01-19 22:13:50 -0800473 if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
Hemant Kumar14401d52011-11-03 16:40:32 -0700474 return -ENODEV;
475
476 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800477 if (!brdg)
478 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700479
Jack Phame8741502012-06-13 17:34:07 -0700480 dev_dbg(&dev->intf->dev, "%s: write (%d bytes)\n", __func__, skb->len);
Hemant Kumar14401d52011-11-03 16:40:32 -0700481
482 result = usb_autopm_get_interface(dev->intf);
483 if (result < 0) {
Jack Pham4380e002012-08-30 19:06:24 -0700484 dev_dbg(&dev->intf->dev, "%s: resume failure\n", __func__);
Hemant Kumar93387bd2012-06-08 15:55:26 -0700485 goto pm_error;
Hemant Kumar14401d52011-11-03 16:40:32 -0700486 }
487
488 txurb = usb_alloc_urb(0, GFP_KERNEL);
489 if (!txurb) {
Jack Phame8741502012-06-13 17:34:07 -0700490 dev_err(&dev->intf->dev, "%s: error allocating read urb\n",
Hemant Kumar14401d52011-11-03 16:40:32 -0700491 __func__);
492 result = -ENOMEM;
493 goto error;
494 }
495
496 /* store dev pointer in skb */
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800497 info->dev = dev;
498 info->tx_queued = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700499
500 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
501 skb->data, skb->len, data_bridge_write_cb, skb);
502
Jack Pham5a10c6f2012-06-26 11:41:28 -0700503 txurb->transfer_flags |= URB_ZERO_PACKET;
504
Hemant Kumar14401d52011-11-03 16:40:32 -0700505 if (test_bit(SUSPENDED, &dev->flags)) {
506 usb_anchor_urb(txurb, &dev->delayed);
507 goto free_urb;
508 }
509
510 pending = atomic_inc_return(&dev->pending_txurbs);
511 usb_anchor_urb(txurb, &dev->tx_active);
512
Hemant Kumar73eff1c2012-01-09 18:49:11 -0800513 if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
514 txurb->transfer_flags |= URB_NO_INTERRUPT;
515
Hemant Kumar14401d52011-11-03 16:40:32 -0700516 result = usb_submit_urb(txurb, GFP_KERNEL);
517 if (result < 0) {
518 usb_unanchor_urb(txurb);
519 atomic_dec(&dev->pending_txurbs);
Jack Phame8741502012-06-13 17:34:07 -0700520 dev_err(&dev->intf->dev, "%s: submit URB error %d\n",
Hemant Kumar14401d52011-11-03 16:40:32 -0700521 __func__, result);
522 goto free_urb;
523 }
524
525 dev->to_modem++;
Jack Phame8741502012-06-13 17:34:07 -0700526 dev_dbg(&dev->intf->dev, "%s: pending_txurbs: %u\n", __func__, pending);
Hemant Kumar14401d52011-11-03 16:40:32 -0700527
528 /* flow control: last urb submitted but return -EBUSY */
529 if (fctrl_support && pending > fctrl_en_thld) {
530 set_bit(TX_THROTTLED, &brdg->flags);
531 dev->tx_throttled_cnt++;
532 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
533 __func__, pending);
534 return -EBUSY;
535 }
536
537 return size;
538
539free_urb:
540 usb_free_urb(txurb);
541error:
542 dev->txurb_drp_cnt++;
543 usb_autopm_put_interface(dev->intf);
Hemant Kumar93387bd2012-06-08 15:55:26 -0700544pm_error:
Hemant Kumar14401d52011-11-03 16:40:32 -0700545 return result;
546}
547EXPORT_SYMBOL(data_bridge_write);
548
549static int data_bridge_resume(struct data_bridge *dev)
550{
551 struct urb *urb;
552 int retval;
553
Jack Pham9b0a0e52012-02-28 18:05:14 -0800554 if (!test_and_clear_bit(SUSPENDED, &dev->flags))
555 return 0;
556
Hemant Kumar14401d52011-11-03 16:40:32 -0700557 while ((urb = usb_get_from_anchor(&dev->delayed))) {
558 usb_anchor_urb(urb, &dev->tx_active);
559 atomic_inc(&dev->pending_txurbs);
560 retval = usb_submit_urb(urb, GFP_ATOMIC);
561 if (retval < 0) {
562 atomic_dec(&dev->pending_txurbs);
563 usb_unanchor_urb(urb);
564
565 /* TODO: need to free urb data */
566 usb_scuttle_anchored_urbs(&dev->delayed);
567 break;
568 }
569 dev->to_modem++;
570 dev->txurb_drp_cnt--;
571 }
572
Hemant Kumar14401d52011-11-03 16:40:32 -0700573 if (dev->brdg)
574 queue_work(dev->wq, &dev->process_rx_w);
575
576 return 0;
577}
578
579static int bridge_resume(struct usb_interface *iface)
580{
581 int retval = 0;
582 int oldstate;
583 struct data_bridge *dev = usb_get_intfdata(iface);
Hemant Kumar14401d52011-11-03 16:40:32 -0700584
585 oldstate = iface->dev.power.power_state.event;
586 iface->dev.power.power_state.event = PM_EVENT_ON;
587
Jack Pham9b0a0e52012-02-28 18:05:14 -0800588 if (oldstate & PM_EVENT_SUSPEND) {
589 retval = data_bridge_resume(dev);
590 if (!retval)
591 retval = ctrl_bridge_resume(dev->id);
Hemant Kumar14401d52011-11-03 16:40:32 -0700592 }
Jack Pham9b0a0e52012-02-28 18:05:14 -0800593
Hemant Kumar14401d52011-11-03 16:40:32 -0700594 return retval;
595}
596
597static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
598{
599 if (atomic_read(&dev->pending_txurbs) &&
600 (message.event & PM_EVENT_AUTO))
601 return -EBUSY;
602
603 set_bit(SUSPENDED, &dev->flags);
604
605 usb_kill_anchored_urbs(&dev->tx_active);
606 usb_kill_anchored_urbs(&dev->rx_active);
607
608 return 0;
609}
610
611static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
612{
613 int retval;
614 struct data_bridge *dev = usb_get_intfdata(intf);
Hemant Kumar14401d52011-11-03 16:40:32 -0700615
616 retval = data_bridge_suspend(dev, message);
617 if (!retval) {
Jack Pham9b0a0e52012-02-28 18:05:14 -0800618 retval = ctrl_bridge_suspend(dev->id);
619 intf->dev.power.power_state.event = message.event;
Hemant Kumar14401d52011-11-03 16:40:32 -0700620 }
Jack Pham9b0a0e52012-02-28 18:05:14 -0800621
Hemant Kumar14401d52011-11-03 16:40:32 -0700622 return retval;
623}
624
625static int data_bridge_probe(struct usb_interface *iface,
626 struct usb_host_endpoint *bulk_in,
627 struct usb_host_endpoint *bulk_out, int id)
628{
629 struct data_bridge *dev;
630
631 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
632 if (!dev) {
633 err("%s: unable to allocate dev\n", __func__);
634 return -ENOMEM;
635 }
636
637 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
638 if (!dev->pdev) {
639 err("%s: unable to allocate platform device\n", __func__);
640 kfree(dev);
641 return -ENOMEM;
642 }
643
644 init_usb_anchor(&dev->tx_active);
645 init_usb_anchor(&dev->rx_active);
646 init_usb_anchor(&dev->delayed);
647
648 INIT_LIST_HEAD(&dev->rx_idle);
649 skb_queue_head_init(&dev->rx_done);
650
651 dev->wq = bridge_wq;
Jack Pham9b0a0e52012-02-28 18:05:14 -0800652 dev->id = id;
Hemant Kumar14401d52011-11-03 16:40:32 -0700653 dev->udev = interface_to_usbdev(iface);
654 dev->intf = iface;
655
656 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
657 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
658
659 dev->bulk_out = usb_sndbulkpipe(dev->udev,
660 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
661
662 usb_set_intfdata(iface, dev);
663
664 INIT_WORK(&dev->kevent, defer_kevent);
665 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
666
667 __dev[id] = dev;
668
669 /*allocate list of rx urbs*/
670 data_bridge_prepare_rx(dev);
671
672 platform_device_add(dev->pdev);
673
674 return 0;
675}
676
677#if defined(CONFIG_DEBUG_FS)
678#define DEBUG_BUF_SIZE 1024
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800679
680static unsigned int record_timestamp;
681module_param(record_timestamp, uint, S_IRUGO | S_IWUSR);
682
683static struct timestamp_buf dbg_data = {
684 .idx = 0,
685 .lck = __RW_LOCK_UNLOCKED(lck)
686};
687
688/*get_timestamp - returns time of day in us */
689static unsigned int get_timestamp(void)
690{
691 struct timeval tval;
692 unsigned int stamp;
693
694 if (!record_timestamp)
695 return 0;
696
697 do_gettimeofday(&tval);
698 /* 2^32 = 4294967296. Limit to 4096s. */
699 stamp = tval.tv_sec & 0xFFF;
700 stamp = stamp * 1000000 + tval.tv_usec;
701 return stamp;
702}
703
704static void dbg_inc(unsigned *idx)
705{
706 *idx = (*idx + 1) & (DBG_DATA_MAX-1);
707}
708
709/**
710* dbg_timestamp - Stores timestamp values of a SKB life cycle
711* to debug buffer
712* @event: "UL": Uplink Data
713* @skb: SKB used to store timestamp values to debug buffer
714*/
715static void dbg_timestamp(char *event, struct sk_buff * skb)
716{
717 unsigned long flags;
718 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
719
720 if (!record_timestamp)
721 return;
722
723 write_lock_irqsave(&dbg_data.lck, flags);
724
725 scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
726 "%p %u[%s] %u %u %u %u %u %u\n",
727 skb, skb->len, event, info->created, info->rx_queued,
728 info->rx_done, info->rx_done_sent, info->tx_queued,
729 get_timestamp());
730
731 dbg_inc(&dbg_data.idx);
732
733 write_unlock_irqrestore(&dbg_data.lck, flags);
734}
735
736/* show_timestamp: displays the timestamp buffer */
737static ssize_t show_timestamp(struct file *file, char __user *ubuf,
738 size_t count, loff_t *ppos)
739{
740 unsigned long flags;
741 unsigned i;
742 unsigned j = 0;
743 char *buf;
744 int ret = 0;
745
746 if (!record_timestamp)
747 return 0;
748
749 buf = kzalloc(sizeof(char) * 4 * DEBUG_BUF_SIZE, GFP_KERNEL);
750 if (!buf)
751 return -ENOMEM;
752
753 read_lock_irqsave(&dbg_data.lck, flags);
754
755 i = dbg_data.idx;
756 for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) {
757 if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG))
758 continue;
759 j += scnprintf(buf + j, (4 * DEBUG_BUF_SIZE) - j,
760 "%s\n", dbg_data.buf[i]);
761 }
762
763 read_unlock_irqrestore(&dbg_data.lck, flags);
764
765 ret = simple_read_from_buffer(ubuf, count, ppos, buf, j);
766
767 kfree(buf);
768
769 return ret;
770}
771
772const struct file_operations data_timestamp_ops = {
773 .read = show_timestamp,
774};
775
Hemant Kumar14401d52011-11-03 16:40:32 -0700776static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
777 size_t count, loff_t *ppos)
778{
779 struct data_bridge *dev;
780 char *buf;
781 int ret;
782 int i;
783 int temp = 0;
784
785 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
786 if (!buf)
787 return -ENOMEM;
788
789 for (i = 0; i < ch_id; i++) {
790 dev = __dev[i];
791 if (!dev)
792 continue;
793
794 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
795 "\nName#%s dev %p\n"
796 "pending tx urbs: %u\n"
797 "tx urb drp cnt: %u\n"
798 "to host: %lu\n"
799 "to mdm: %lu\n"
800 "tx throttled cnt: %u\n"
801 "tx unthrottled cnt: %u\n"
802 "rx throttled cnt: %u\n"
803 "rx unthrottled cnt: %u\n"
804 "rx done skb qlen: %u\n"
Hemant Kumar06b7e432012-01-19 22:13:50 -0800805 "dev err: %d\n"
Hemant Kumar14401d52011-11-03 16:40:32 -0700806 "suspended: %d\n"
807 "TX_HALT: %d\n"
808 "RX_HALT: %d\n",
809 dev->pdev->name, dev,
810 atomic_read(&dev->pending_txurbs),
811 dev->txurb_drp_cnt,
812 dev->to_host,
813 dev->to_modem,
814 dev->tx_throttled_cnt,
815 dev->tx_unthrottled_cnt,
816 dev->rx_throttled_cnt,
817 dev->rx_unthrottled_cnt,
818 dev->rx_done.qlen,
Hemant Kumar06b7e432012-01-19 22:13:50 -0800819 dev->err,
Hemant Kumar14401d52011-11-03 16:40:32 -0700820 test_bit(SUSPENDED, &dev->flags),
821 test_bit(TX_HALT, &dev->flags),
822 test_bit(RX_HALT, &dev->flags));
823
824 }
825
826 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
827
828 kfree(buf);
829
830 return ret;
831}
832
833static ssize_t data_bridge_reset_stats(struct file *file,
834 const char __user *buf, size_t count, loff_t *ppos)
835{
836 struct data_bridge *dev;
837 int i;
838
839 for (i = 0; i < ch_id; i++) {
840 dev = __dev[i];
841 if (!dev)
842 continue;
843
844 dev->to_host = 0;
845 dev->to_modem = 0;
846 dev->txurb_drp_cnt = 0;
847 dev->tx_throttled_cnt = 0;
848 dev->tx_unthrottled_cnt = 0;
849 dev->rx_throttled_cnt = 0;
850 dev->rx_unthrottled_cnt = 0;
851 }
852 return count;
853}
854
855const struct file_operations data_stats_ops = {
856 .read = data_bridge_read_stats,
857 .write = data_bridge_reset_stats,
858};
859
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800860static struct dentry *data_dent;
861static struct dentry *data_dfile_stats;
862static struct dentry *data_dfile_tstamp;
863
Hemant Kumar14401d52011-11-03 16:40:32 -0700864static void data_bridge_debugfs_init(void)
865{
866 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
867 if (IS_ERR(data_dent))
868 return;
869
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800870 data_dfile_stats = debugfs_create_file("status", 0644, data_dent, 0,
871 &data_stats_ops);
872 if (!data_dfile_stats || IS_ERR(data_dfile_stats)) {
873 debugfs_remove(data_dent);
874 return;
875 }
876
877 data_dfile_tstamp = debugfs_create_file("timestamp", 0644, data_dent,
878 0, &data_timestamp_ops);
879 if (!data_dfile_tstamp || IS_ERR(data_dfile_tstamp))
Hemant Kumar14401d52011-11-03 16:40:32 -0700880 debugfs_remove(data_dent);
881}
882
883static void data_bridge_debugfs_exit(void)
884{
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800885 debugfs_remove(data_dfile_stats);
886 debugfs_remove(data_dfile_tstamp);
Hemant Kumar14401d52011-11-03 16:40:32 -0700887 debugfs_remove(data_dent);
888}
889
890#else
891static void data_bridge_debugfs_init(void) { }
892static void data_bridge_debugfs_exit(void) { }
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800893static void dbg_timestamp(char *event, struct sk_buff * skb)
894{
895 return;
896}
897
898static unsigned int get_timestamp(void)
899{
900 return 0;
901}
902
Hemant Kumar14401d52011-11-03 16:40:32 -0700903#endif
904
905static int __devinit
906bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
907{
908 struct usb_host_endpoint *endpoint = NULL;
909 struct usb_host_endpoint *bulk_in = NULL;
910 struct usb_host_endpoint *bulk_out = NULL;
911 struct usb_host_endpoint *int_in = NULL;
912 struct usb_device *udev;
913 int i;
914 int status = 0;
915 int numends;
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800916 unsigned int iface_num;
Hemant Kumar14401d52011-11-03 16:40:32 -0700917
918 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
919
920 if (iface->num_altsetting != 1) {
921 err("%s invalid num_altsetting %u\n",
922 __func__, iface->num_altsetting);
923 return -EINVAL;
924 }
925
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800926 if (!test_bit(iface_num, &id->driver_info))
Hemant Kumar46f9f242011-12-15 20:20:58 -0800927 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700928
Jack Pham1b1ba472012-06-13 16:40:15 -0700929 udev = interface_to_usbdev(iface);
930 usb_get_dev(udev);
931
Hemant Kumar14401d52011-11-03 16:40:32 -0700932 numends = iface->cur_altsetting->desc.bNumEndpoints;
933 for (i = 0; i < numends; i++) {
934 endpoint = iface->cur_altsetting->endpoint + i;
935 if (!endpoint) {
Jack Phame8741502012-06-13 17:34:07 -0700936 dev_err(&iface->dev, "%s: invalid endpoint %u\n",
Hemant Kumar14401d52011-11-03 16:40:32 -0700937 __func__, i);
938 status = -EINVAL;
939 goto out;
940 }
941
942 if (usb_endpoint_is_bulk_in(&endpoint->desc))
943 bulk_in = endpoint;
944 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
945 bulk_out = endpoint;
946 else if (usb_endpoint_is_int_in(&endpoint->desc))
947 int_in = endpoint;
948 }
949
950 if (!bulk_in || !bulk_out || !int_in) {
Jack Phame8741502012-06-13 17:34:07 -0700951 dev_err(&iface->dev, "%s: invalid endpoints\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700952 status = -EINVAL;
953 goto out;
954 }
955
956 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
957 if (status < 0) {
Jack Phame8741502012-06-13 17:34:07 -0700958 dev_err(&iface->dev, "data_bridge_probe failed %d\n", status);
Hemant Kumar14401d52011-11-03 16:40:32 -0700959 goto out;
960 }
961
962 status = ctrl_bridge_probe(iface, int_in, ch_id);
963 if (status < 0) {
Jack Phame8741502012-06-13 17:34:07 -0700964 dev_err(&iface->dev, "ctrl_bridge_probe failed %d\n", status);
Hemant Kumar14401d52011-11-03 16:40:32 -0700965 goto free_data_bridge;
966 }
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800967
Hemant Kumar14401d52011-11-03 16:40:32 -0700968 ch_id++;
969
970 return 0;
971
972free_data_bridge:
Jack Pham1b1ba472012-06-13 16:40:15 -0700973 platform_device_unregister(__dev[ch_id]->pdev);
Hemant Kumar14401d52011-11-03 16:40:32 -0700974 usb_set_intfdata(iface, NULL);
975 kfree(__dev[ch_id]);
976 __dev[ch_id] = NULL;
977out:
978 usb_put_dev(udev);
979
980 return status;
981}
982
983static void bridge_disconnect(struct usb_interface *intf)
984{
985 struct data_bridge *dev = usb_get_intfdata(intf);
986 struct list_head *head;
987 struct urb *rx_urb;
988 unsigned long flags;
Hemant Kumar14401d52011-11-03 16:40:32 -0700989
990 if (!dev) {
991 err("%s: data device not found\n", __func__);
992 return;
993 }
994
Hemant Kumar14401d52011-11-03 16:40:32 -0700995 ch_id--;
Hemant Kumar45bdca82012-09-02 11:10:35 -0700996 ctrl_bridge_disconnect(dev->id);
Jack Pham1b1ba472012-06-13 16:40:15 -0700997 platform_device_unregister(dev->pdev);
Hemant Kumar14401d52011-11-03 16:40:32 -0700998 usb_set_intfdata(intf, NULL);
Hemant Kumar45bdca82012-09-02 11:10:35 -0700999 __dev[dev->id] = NULL;
Hemant Kumar14401d52011-11-03 16:40:32 -07001000
Hemant Kumar14401d52011-11-03 16:40:32 -07001001 /*free rx urbs*/
1002 head = &dev->rx_idle;
1003 spin_lock_irqsave(&dev->rx_done.lock, flags);
1004 while (!list_empty(head)) {
1005 rx_urb = list_entry(head->next, struct urb, urb_list);
1006 list_del(&rx_urb->urb_list);
1007 usb_free_urb(rx_urb);
1008 }
1009 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
1010
1011 usb_put_dev(dev->udev);
1012 kfree(dev);
1013}
1014
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001015/*bit position represents interface number*/
1016#define PID9001_IFACE_MASK 0xC
1017#define PID9034_IFACE_MASK 0xC
1018#define PID9048_IFACE_MASK 0x18
Hemant Kumare97cbb32012-02-24 13:00:57 -08001019#define PID904C_IFACE_MASK 0x28
Hemant Kumarce3c5bf2012-12-06 15:52:02 -08001020#define PID9075_IFACE_MASK 0x28
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001021
Hemant Kumar14401d52011-11-03 16:40:32 -07001022static const struct usb_device_id bridge_ids[] = {
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001023 { USB_DEVICE(0x5c6, 0x9001),
1024 .driver_info = PID9001_IFACE_MASK,
1025 },
1026 { USB_DEVICE(0x5c6, 0x9034),
1027 .driver_info = PID9034_IFACE_MASK,
1028 },
1029 { USB_DEVICE(0x5c6, 0x9048),
1030 .driver_info = PID9048_IFACE_MASK,
1031 },
Hemant Kumare97cbb32012-02-24 13:00:57 -08001032 { USB_DEVICE(0x5c6, 0x904c),
1033 .driver_info = PID904C_IFACE_MASK,
1034 },
Hemant Kumarce3c5bf2012-12-06 15:52:02 -08001035 { USB_DEVICE(0x5c6, 0x9075),
1036 .driver_info = PID9075_IFACE_MASK,
1037 },
Jack Phamb1ad7152011-12-07 10:58:11 -08001038
1039 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -07001040};
Hemant Kumar14401d52011-11-03 16:40:32 -07001041MODULE_DEVICE_TABLE(usb, bridge_ids);
1042
1043static struct usb_driver bridge_driver = {
1044 .name = "mdm_bridge",
1045 .probe = bridge_probe,
1046 .disconnect = bridge_disconnect,
1047 .id_table = bridge_ids,
1048 .suspend = bridge_suspend,
1049 .resume = bridge_resume,
1050 .supports_autosuspend = 1,
1051};
1052
1053static int __init bridge_init(void)
1054{
1055 int ret;
1056
1057 ret = usb_register(&bridge_driver);
1058 if (ret) {
1059 err("%s: unable to register mdm_bridge driver", __func__);
1060 return ret;
1061 }
1062
1063 bridge_wq = create_singlethread_workqueue("mdm_bridge");
1064 if (!bridge_wq) {
1065 usb_deregister(&bridge_driver);
1066 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
1067 return -ENOMEM;
1068 }
1069
1070 data_bridge_debugfs_init();
1071
1072 return 0;
1073}
1074
1075static void __exit bridge_exit(void)
1076{
1077 data_bridge_debugfs_exit();
1078 destroy_workqueue(bridge_wq);
1079 usb_deregister(&bridge_driver);
1080}
1081
1082module_init(bridge_init);
1083module_exit(bridge_exit);
1084
1085MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
1086MODULE_LICENSE("GPL v2");