blob: f783f094b548e8803906c2f3fafb55d8a02fa8aa [file] [log] [blame]
Pavankumar Kondeti50f38152013-01-17 16:04:53 +05301/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
Hemant Kumar14401d52011-11-03 16:40:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/debugfs.h>
19#include <linux/platform_device.h>
20#include <linux/uaccess.h>
21#include <linux/ratelimit.h>
22#include <mach/usb_bridge.h>
23
Hemant Kumare8f691f2012-10-03 17:15:28 -070024#define MAX_RX_URBS 100
Hemant Kumar14401d52011-11-03 16:40:32 -070025#define RMNET_RX_BUFSIZE 2048
26
Hemant Kumarfd2d5e52011-12-22 17:51:53 -080027#define STOP_SUBMIT_URB_LIMIT 500
Hemant Kumar14401d52011-11-03 16:40:32 -070028#define FLOW_CTRL_EN_THRESHOLD 500
29#define FLOW_CTRL_DISABLE 300
30#define FLOW_CTRL_SUPPORT 1
31
32static const char *data_bridge_names[] = {
33 "dun_data_hsic0",
34 "rmnet_data_hsic0"
35};
36
37static struct workqueue_struct *bridge_wq;
38
39static unsigned int fctrl_support = FLOW_CTRL_SUPPORT;
40module_param(fctrl_support, uint, S_IRUGO | S_IWUSR);
41
42static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD;
43module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
44
45static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE;
46module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int max_rx_urbs = MAX_RX_URBS;
49module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR);
50
51unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT;
52module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR);
53
Hemant Kumar73eff1c2012-01-09 18:49:11 -080054static unsigned tx_urb_mult = 20;
55module_param(tx_urb_mult, uint, S_IRUGO|S_IWUSR);
56
Hemant Kumar14401d52011-11-03 16:40:32 -070057#define TX_HALT BIT(0)
58#define RX_HALT BIT(1)
59#define SUSPENDED BIT(2)
60
61struct data_bridge {
62 struct usb_interface *intf;
63 struct usb_device *udev;
Jack Pham9b0a0e52012-02-28 18:05:14 -080064 int id;
65
Hemant Kumar14401d52011-11-03 16:40:32 -070066 unsigned int bulk_in;
67 unsigned int bulk_out;
Hemant Kumar06b7e432012-01-19 22:13:50 -080068 int err;
Hemant Kumar14401d52011-11-03 16:40:32 -070069
70 /* keep track of in-flight URBs */
71 struct usb_anchor tx_active;
72 struct usb_anchor rx_active;
73
74 /* keep track of outgoing URBs during suspend */
75 struct usb_anchor delayed;
76
77 struct list_head rx_idle;
78 struct sk_buff_head rx_done;
79
80 struct workqueue_struct *wq;
81 struct work_struct process_rx_w;
82
83 struct bridge *brdg;
84
85 /* work queue function for handling halt conditions */
86 struct work_struct kevent;
87
88 unsigned long flags;
89
90 struct platform_device *pdev;
91
92 /* counters */
93 atomic_t pending_txurbs;
94 unsigned int txurb_drp_cnt;
95 unsigned long to_host;
96 unsigned long to_modem;
97 unsigned int tx_throttled_cnt;
98 unsigned int tx_unthrottled_cnt;
99 unsigned int rx_throttled_cnt;
100 unsigned int rx_unthrottled_cnt;
101};
102
103static struct data_bridge *__dev[MAX_BRIDGE_DEVICES];
104
105/* counter used for indexing data bridge devices */
106static int ch_id;
107
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800108static unsigned int get_timestamp(void);
109static void dbg_timestamp(char *, struct sk_buff *);
Hemant Kumar14401d52011-11-03 16:40:32 -0700110static int submit_rx_urb(struct data_bridge *dev, struct urb *urb,
111 gfp_t flags);
112
113static inline bool rx_halted(struct data_bridge *dev)
114{
115 return test_bit(RX_HALT, &dev->flags);
116}
117
118static inline bool rx_throttled(struct bridge *brdg)
119{
120 return test_bit(RX_THROTTLED, &brdg->flags);
121}
122
Hemant Kumar262682a2013-02-01 09:46:45 -0800123static void free_rx_urbs(struct data_bridge *dev)
124{
125 struct list_head *head;
126 struct urb *rx_urb;
127 unsigned long flags;
128
129 head = &dev->rx_idle;
130 spin_lock_irqsave(&dev->rx_done.lock, flags);
131 while (!list_empty(head)) {
132 rx_urb = list_entry(head->next, struct urb, urb_list);
133 list_del(&rx_urb->urb_list);
134 usb_free_urb(rx_urb);
135 }
136 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
137}
138
Hemant Kumar14401d52011-11-03 16:40:32 -0700139int data_bridge_unthrottle_rx(unsigned int id)
140{
141 struct data_bridge *dev;
142
143 if (id >= MAX_BRIDGE_DEVICES)
144 return -EINVAL;
145
146 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800147 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700148 return -ENODEV;
149
150 dev->rx_unthrottled_cnt++;
151 queue_work(dev->wq, &dev->process_rx_w);
152
153 return 0;
154}
155EXPORT_SYMBOL(data_bridge_unthrottle_rx);
156
157static void data_bridge_process_rx(struct work_struct *work)
158{
159 int retval;
160 unsigned long flags;
161 struct urb *rx_idle;
162 struct sk_buff *skb;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800163 struct timestamp_info *info;
Hemant Kumar14401d52011-11-03 16:40:32 -0700164 struct data_bridge *dev =
165 container_of(work, struct data_bridge, process_rx_w);
166
167 struct bridge *brdg = dev->brdg;
168
169 if (!brdg || !brdg->ops.send_pkt || rx_halted(dev))
170 return;
171
172 while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) {
173 dev->to_host++;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800174 info = (struct timestamp_info *)skb->cb;
175 info->rx_done_sent = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700176 /* hand off sk_buff to client,they'll need to free it */
177 retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len);
178 if (retval == -ENOTCONN || retval == -EINVAL) {
179 return;
180 } else if (retval == -EBUSY) {
181 dev->rx_throttled_cnt++;
182 break;
183 }
184 }
185
186 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar14401d52011-11-03 16:40:32 -0700187 while (!list_empty(&dev->rx_idle)) {
Hemant Kumarfd2d5e52011-12-22 17:51:53 -0800188 if (dev->rx_done.qlen > stop_submit_urb_limit)
189 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700190
191 rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list);
192 list_del(&rx_idle->urb_list);
193 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
194 retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL);
195 spin_lock_irqsave(&dev->rx_done.lock, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800196 if (retval) {
197 list_add_tail(&rx_idle->urb_list, &dev->rx_idle);
Hemant Kumar14401d52011-11-03 16:40:32 -0700198 break;
Hemant Kumar184765b2011-12-27 13:20:45 -0800199 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700200 }
201 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
202}
203
204static void data_bridge_read_cb(struct urb *urb)
205{
206 struct bridge *brdg;
207 struct sk_buff *skb = urb->context;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800208 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
209 struct data_bridge *dev = info->dev;
Hemant Kumar14401d52011-11-03 16:40:32 -0700210 bool queue = 0;
211
Hemant Kumar262682a2013-02-01 09:46:45 -0800212 /*usb device disconnect*/
213 if (urb->dev->state == USB_STATE_NOTATTACHED)
214 urb->status = -ECONNRESET;
215
Hemant Kumar14401d52011-11-03 16:40:32 -0700216 brdg = dev->brdg;
Hemant Kumar14401d52011-11-03 16:40:32 -0700217 skb_put(skb, urb->actual_length);
218
219 switch (urb->status) {
220 case 0: /* success */
221 queue = 1;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800222 info->rx_done = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700223 spin_lock(&dev->rx_done.lock);
224 __skb_queue_tail(&dev->rx_done, skb);
225 spin_unlock(&dev->rx_done.lock);
226 break;
227
228 /*do not resubmit*/
229 case -EPIPE:
230 set_bit(RX_HALT, &dev->flags);
Jack Phame8741502012-06-13 17:34:07 -0700231 dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700232 schedule_work(&dev->kevent);
233 /* FALLTHROUGH */
234 case -ESHUTDOWN:
235 case -ENOENT: /* suspended */
236 case -ECONNRESET: /* unplug */
237 case -EPROTO:
238 dev_kfree_skb_any(skb);
239 break;
240
241 /*resubmit */
242 case -EOVERFLOW: /*babble error*/
243 default:
244 queue = 1;
245 dev_kfree_skb_any(skb);
246 pr_debug_ratelimited("%s: non zero urb status = %d\n",
247 __func__, urb->status);
248 break;
249 }
250
251 spin_lock(&dev->rx_done.lock);
252 list_add_tail(&urb->urb_list, &dev->rx_idle);
253 spin_unlock(&dev->rx_done.lock);
254
255 if (queue)
256 queue_work(dev->wq, &dev->process_rx_w);
257}
258
259static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb,
260 gfp_t flags)
261{
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800262 struct sk_buff *skb;
263 struct timestamp_info *info;
264 int retval = -EINVAL;
265 unsigned int created;
Hemant Kumar14401d52011-11-03 16:40:32 -0700266
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800267 created = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700268 skb = alloc_skb(RMNET_RX_BUFSIZE, flags);
Hemant Kumar184765b2011-12-27 13:20:45 -0800269 if (!skb)
Hemant Kumar14401d52011-11-03 16:40:32 -0700270 return -ENOMEM;
Hemant Kumar14401d52011-11-03 16:40:32 -0700271
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800272 info = (struct timestamp_info *)skb->cb;
273 info->dev = dev;
274 info->created = created;
Hemant Kumar14401d52011-11-03 16:40:32 -0700275
276 usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in,
277 skb->data, RMNET_RX_BUFSIZE,
278 data_bridge_read_cb, skb);
279
280 if (test_bit(SUSPENDED, &dev->flags))
281 goto suspended;
282
283 usb_anchor_urb(rx_urb, &dev->rx_active);
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800284 info->rx_queued = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700285 retval = usb_submit_urb(rx_urb, flags);
286 if (retval)
287 goto fail;
288
Hemant Kumarb72233f2012-04-04 13:21:44 -0700289 usb_mark_last_busy(dev->udev);
Hemant Kumar14401d52011-11-03 16:40:32 -0700290 return 0;
291fail:
292 usb_unanchor_urb(rx_urb);
293suspended:
294 dev_kfree_skb_any(skb);
Hemant Kumar184765b2011-12-27 13:20:45 -0800295
Hemant Kumar14401d52011-11-03 16:40:32 -0700296 return retval;
297}
298
299static int data_bridge_prepare_rx(struct data_bridge *dev)
300{
301 int i;
302 struct urb *rx_urb;
Hemant Kumar262682a2013-02-01 09:46:45 -0800303 int retval = 0;
Hemant Kumar14401d52011-11-03 16:40:32 -0700304
305 for (i = 0; i < max_rx_urbs; i++) {
306 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
Hemant Kumar262682a2013-02-01 09:46:45 -0800307 if (!rx_urb) {
308 retval = -ENOMEM;
309 goto free_urbs;
310 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700311
312 list_add_tail(&rx_urb->urb_list, &dev->rx_idle);
313 }
Hemant Kumar262682a2013-02-01 09:46:45 -0800314
315 return 0;
316
317free_urbs:
318 free_rx_urbs(dev);
319 return retval;
Hemant Kumar14401d52011-11-03 16:40:32 -0700320}
321
322int data_bridge_open(struct bridge *brdg)
323{
324 struct data_bridge *dev;
325
326 if (!brdg) {
327 err("bridge is null\n");
328 return -EINVAL;
329 }
330
331 if (brdg->ch_id >= MAX_BRIDGE_DEVICES)
332 return -EINVAL;
333
334 dev = __dev[brdg->ch_id];
335 if (!dev) {
336 err("dev is null\n");
337 return -ENODEV;
338 }
339
Jack Phame8741502012-06-13 17:34:07 -0700340 dev_dbg(&dev->intf->dev, "%s: dev:%p\n", __func__, dev);
Hemant Kumar14401d52011-11-03 16:40:32 -0700341
342 dev->brdg = brdg;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800343 dev->err = 0;
Hemant Kumar14401d52011-11-03 16:40:32 -0700344 atomic_set(&dev->pending_txurbs, 0);
345 dev->to_host = 0;
346 dev->to_modem = 0;
347 dev->txurb_drp_cnt = 0;
348 dev->tx_throttled_cnt = 0;
349 dev->tx_unthrottled_cnt = 0;
350 dev->rx_throttled_cnt = 0;
351 dev->rx_unthrottled_cnt = 0;
352
353 queue_work(dev->wq, &dev->process_rx_w);
354
355 return 0;
356}
357EXPORT_SYMBOL(data_bridge_open);
358
359void data_bridge_close(unsigned int id)
360{
361 struct data_bridge *dev;
362 struct sk_buff *skb;
363 unsigned long flags;
364
365 if (id >= MAX_BRIDGE_DEVICES)
366 return;
367
368 dev = __dev[id];
Jack Phama7c92672011-11-29 16:38:21 -0800369 if (!dev || !dev->brdg)
Hemant Kumar14401d52011-11-03 16:40:32 -0700370 return;
371
Jack Phame8741502012-06-13 17:34:07 -0700372 dev_dbg(&dev->intf->dev, "%s:\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700373
Pavankumar Kondeti50f38152013-01-17 16:04:53 +0530374 cancel_work_sync(&dev->kevent);
375 cancel_work_sync(&dev->process_rx_w);
376
Hemant Kumar262682a2013-02-01 09:46:45 -0800377 usb_kill_anchored_urbs(&dev->tx_active);
378 usb_kill_anchored_urbs(&dev->rx_active);
379 usb_kill_anchored_urbs(&dev->delayed);
Hemant Kumar14401d52011-11-03 16:40:32 -0700380
381 spin_lock_irqsave(&dev->rx_done.lock, flags);
382 while ((skb = __skb_dequeue(&dev->rx_done)))
383 dev_kfree_skb_any(skb);
384 spin_unlock_irqrestore(&dev->rx_done.lock, flags);
385
386 dev->brdg = NULL;
387}
388EXPORT_SYMBOL(data_bridge_close);
389
390static void defer_kevent(struct work_struct *work)
391{
392 int status;
393 struct data_bridge *dev =
394 container_of(work, struct data_bridge, kevent);
395
396 if (!dev)
397 return;
398
399 if (test_bit(TX_HALT, &dev->flags)) {
400 usb_unlink_anchored_urbs(&dev->tx_active);
401
402 status = usb_autopm_get_interface(dev->intf);
403 if (status < 0) {
Jack Pham4380e002012-08-30 19:06:24 -0700404 dev_dbg(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700405 "can't acquire interface, status %d\n", status);
406 return;
407 }
408
409 status = usb_clear_halt(dev->udev, dev->bulk_out);
410 usb_autopm_put_interface(dev->intf);
411 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
Jack Phame8741502012-06-13 17:34:07 -0700412 dev_err(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700413 "can't clear tx halt, status %d\n", status);
414 else
415 clear_bit(TX_HALT, &dev->flags);
416 }
417
418 if (test_bit(RX_HALT, &dev->flags)) {
419 usb_unlink_anchored_urbs(&dev->rx_active);
420
421 status = usb_autopm_get_interface(dev->intf);
422 if (status < 0) {
Jack Pham4380e002012-08-30 19:06:24 -0700423 dev_dbg(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700424 "can't acquire interface, status %d\n", status);
425 return;
426 }
427
428 status = usb_clear_halt(dev->udev, dev->bulk_in);
429 usb_autopm_put_interface(dev->intf);
430 if (status < 0 && status != -EPIPE && status != -ESHUTDOWN)
Jack Phame8741502012-06-13 17:34:07 -0700431 dev_err(&dev->intf->dev,
Hemant Kumar14401d52011-11-03 16:40:32 -0700432 "can't clear rx halt, status %d\n", status);
433 else {
434 clear_bit(RX_HALT, &dev->flags);
435 if (dev->brdg)
436 queue_work(dev->wq, &dev->process_rx_w);
437 }
438 }
439}
440
441static void data_bridge_write_cb(struct urb *urb)
442{
443 struct sk_buff *skb = urb->context;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800444 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
445 struct data_bridge *dev = info->dev;
Hemant Kumar14401d52011-11-03 16:40:32 -0700446 struct bridge *brdg = dev->brdg;
447 int pending;
448
449 pr_debug("%s: dev:%p\n", __func__, dev);
450
451 switch (urb->status) {
452 case 0: /*success*/
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800453 dbg_timestamp("UL", skb);
Hemant Kumar14401d52011-11-03 16:40:32 -0700454 break;
Hemant Kumar06b7e432012-01-19 22:13:50 -0800455 case -EPROTO:
456 dev->err = -EPROTO;
457 break;
Hemant Kumar14401d52011-11-03 16:40:32 -0700458 case -EPIPE:
459 set_bit(TX_HALT, &dev->flags);
Jack Phame8741502012-06-13 17:34:07 -0700460 dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700461 schedule_work(&dev->kevent);
462 /* FALLTHROUGH */
463 case -ESHUTDOWN:
464 case -ENOENT: /* suspended */
465 case -ECONNRESET: /* unplug */
466 case -EOVERFLOW: /*babble error*/
467 /* FALLTHROUGH */
468 default:
469 pr_debug_ratelimited("%s: non zero urb status = %d\n",
470 __func__, urb->status);
471 }
472
473 usb_free_urb(urb);
474 dev_kfree_skb_any(skb);
475
476 pending = atomic_dec_return(&dev->pending_txurbs);
477
478 /*flow ctrl*/
479 if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
480 test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
481 pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
482 __func__, pending);
483 dev->tx_unthrottled_cnt++;
484 if (brdg->ops.unthrottle_tx)
485 brdg->ops.unthrottle_tx(brdg->ctx);
486 }
487
Hemant Kumar262682a2013-02-01 09:46:45 -0800488 /* if we are here after device disconnect
489 * usb_unbind_interface() takes care of
490 * residual pm_autopm_get_interface_* calls
491 */
492 if (urb->dev->state != USB_STATE_NOTATTACHED)
493 usb_autopm_put_interface_async(dev->intf);
Hemant Kumar14401d52011-11-03 16:40:32 -0700494}
495
496int data_bridge_write(unsigned int id, struct sk_buff *skb)
497{
498 int result;
499 int size = skb->len;
500 int pending;
501 struct urb *txurb;
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800502 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
Hemant Kumar14401d52011-11-03 16:40:32 -0700503 struct data_bridge *dev = __dev[id];
504 struct bridge *brdg;
505
Hemant Kumar06b7e432012-01-19 22:13:50 -0800506 if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
Hemant Kumar14401d52011-11-03 16:40:32 -0700507 return -ENODEV;
508
509 brdg = dev->brdg;
Hemant Kumarc8a3d312011-12-27 15:41:32 -0800510 if (!brdg)
511 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700512
Jack Phame8741502012-06-13 17:34:07 -0700513 dev_dbg(&dev->intf->dev, "%s: write (%d bytes)\n", __func__, skb->len);
Hemant Kumar14401d52011-11-03 16:40:32 -0700514
515 result = usb_autopm_get_interface(dev->intf);
516 if (result < 0) {
Jack Pham4380e002012-08-30 19:06:24 -0700517 dev_dbg(&dev->intf->dev, "%s: resume failure\n", __func__);
Hemant Kumar93387bd2012-06-08 15:55:26 -0700518 goto pm_error;
Hemant Kumar14401d52011-11-03 16:40:32 -0700519 }
520
521 txurb = usb_alloc_urb(0, GFP_KERNEL);
522 if (!txurb) {
Jack Phame8741502012-06-13 17:34:07 -0700523 dev_err(&dev->intf->dev, "%s: error allocating read urb\n",
Hemant Kumar14401d52011-11-03 16:40:32 -0700524 __func__);
525 result = -ENOMEM;
526 goto error;
527 }
528
529 /* store dev pointer in skb */
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800530 info->dev = dev;
531 info->tx_queued = get_timestamp();
Hemant Kumar14401d52011-11-03 16:40:32 -0700532
533 usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
534 skb->data, skb->len, data_bridge_write_cb, skb);
535
Jack Pham5a10c6f2012-06-26 11:41:28 -0700536 txurb->transfer_flags |= URB_ZERO_PACKET;
537
Hemant Kumar14401d52011-11-03 16:40:32 -0700538 if (test_bit(SUSPENDED, &dev->flags)) {
539 usb_anchor_urb(txurb, &dev->delayed);
540 goto free_urb;
541 }
542
543 pending = atomic_inc_return(&dev->pending_txurbs);
544 usb_anchor_urb(txurb, &dev->tx_active);
545
Hemant Kumar73eff1c2012-01-09 18:49:11 -0800546 if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
547 txurb->transfer_flags |= URB_NO_INTERRUPT;
548
Hemant Kumar14401d52011-11-03 16:40:32 -0700549 result = usb_submit_urb(txurb, GFP_KERNEL);
550 if (result < 0) {
551 usb_unanchor_urb(txurb);
552 atomic_dec(&dev->pending_txurbs);
Jack Phame8741502012-06-13 17:34:07 -0700553 dev_err(&dev->intf->dev, "%s: submit URB error %d\n",
Hemant Kumar14401d52011-11-03 16:40:32 -0700554 __func__, result);
555 goto free_urb;
556 }
557
558 dev->to_modem++;
Jack Phame8741502012-06-13 17:34:07 -0700559 dev_dbg(&dev->intf->dev, "%s: pending_txurbs: %u\n", __func__, pending);
Hemant Kumar14401d52011-11-03 16:40:32 -0700560
561 /* flow control: last urb submitted but return -EBUSY */
562 if (fctrl_support && pending > fctrl_en_thld) {
563 set_bit(TX_THROTTLED, &brdg->flags);
564 dev->tx_throttled_cnt++;
565 pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
566 __func__, pending);
567 return -EBUSY;
568 }
569
570 return size;
571
572free_urb:
573 usb_free_urb(txurb);
574error:
575 dev->txurb_drp_cnt++;
576 usb_autopm_put_interface(dev->intf);
Hemant Kumar93387bd2012-06-08 15:55:26 -0700577pm_error:
Hemant Kumar14401d52011-11-03 16:40:32 -0700578 return result;
579}
580EXPORT_SYMBOL(data_bridge_write);
581
582static int data_bridge_resume(struct data_bridge *dev)
583{
584 struct urb *urb;
585 int retval;
586
Jack Pham9b0a0e52012-02-28 18:05:14 -0800587 if (!test_and_clear_bit(SUSPENDED, &dev->flags))
588 return 0;
589
Hemant Kumar14401d52011-11-03 16:40:32 -0700590 while ((urb = usb_get_from_anchor(&dev->delayed))) {
591 usb_anchor_urb(urb, &dev->tx_active);
592 atomic_inc(&dev->pending_txurbs);
593 retval = usb_submit_urb(urb, GFP_ATOMIC);
594 if (retval < 0) {
595 atomic_dec(&dev->pending_txurbs);
596 usb_unanchor_urb(urb);
597
598 /* TODO: need to free urb data */
599 usb_scuttle_anchored_urbs(&dev->delayed);
600 break;
601 }
602 dev->to_modem++;
603 dev->txurb_drp_cnt--;
604 }
605
Hemant Kumar14401d52011-11-03 16:40:32 -0700606 if (dev->brdg)
607 queue_work(dev->wq, &dev->process_rx_w);
608
609 return 0;
610}
611
612static int bridge_resume(struct usb_interface *iface)
613{
614 int retval = 0;
615 int oldstate;
616 struct data_bridge *dev = usb_get_intfdata(iface);
Hemant Kumar14401d52011-11-03 16:40:32 -0700617
618 oldstate = iface->dev.power.power_state.event;
619 iface->dev.power.power_state.event = PM_EVENT_ON;
620
Jack Pham9b0a0e52012-02-28 18:05:14 -0800621 if (oldstate & PM_EVENT_SUSPEND) {
622 retval = data_bridge_resume(dev);
623 if (!retval)
624 retval = ctrl_bridge_resume(dev->id);
Hemant Kumar14401d52011-11-03 16:40:32 -0700625 }
Jack Pham9b0a0e52012-02-28 18:05:14 -0800626
Hemant Kumar14401d52011-11-03 16:40:32 -0700627 return retval;
628}
629
630static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message)
631{
632 if (atomic_read(&dev->pending_txurbs) &&
633 (message.event & PM_EVENT_AUTO))
634 return -EBUSY;
635
636 set_bit(SUSPENDED, &dev->flags);
637
638 usb_kill_anchored_urbs(&dev->tx_active);
639 usb_kill_anchored_urbs(&dev->rx_active);
640
641 return 0;
642}
643
644static int bridge_suspend(struct usb_interface *intf, pm_message_t message)
645{
646 int retval;
647 struct data_bridge *dev = usb_get_intfdata(intf);
Hemant Kumar14401d52011-11-03 16:40:32 -0700648
649 retval = data_bridge_suspend(dev, message);
650 if (!retval) {
Jack Pham9b0a0e52012-02-28 18:05:14 -0800651 retval = ctrl_bridge_suspend(dev->id);
652 intf->dev.power.power_state.event = message.event;
Hemant Kumar14401d52011-11-03 16:40:32 -0700653 }
Jack Pham9b0a0e52012-02-28 18:05:14 -0800654
Hemant Kumar14401d52011-11-03 16:40:32 -0700655 return retval;
656}
657
658static int data_bridge_probe(struct usb_interface *iface,
659 struct usb_host_endpoint *bulk_in,
660 struct usb_host_endpoint *bulk_out, int id)
661{
662 struct data_bridge *dev;
Hemant Kumar262682a2013-02-01 09:46:45 -0800663 int retval;
Hemant Kumar14401d52011-11-03 16:40:32 -0700664
Hemant Kumar262682a2013-02-01 09:46:45 -0800665 dev = __dev[id];
Hemant Kumar14401d52011-11-03 16:40:32 -0700666 if (!dev) {
Hemant Kumar262682a2013-02-01 09:46:45 -0800667 err("%s: device not found\n", __func__);
668 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700669 }
670
671 dev->pdev = platform_device_alloc(data_bridge_names[id], id);
672 if (!dev->pdev) {
673 err("%s: unable to allocate platform device\n", __func__);
674 kfree(dev);
675 return -ENOMEM;
676 }
677
Hemant Kumar262682a2013-02-01 09:46:45 -0800678 dev->flags = 0;
Jack Pham9b0a0e52012-02-28 18:05:14 -0800679 dev->id = id;
Hemant Kumar14401d52011-11-03 16:40:32 -0700680 dev->udev = interface_to_usbdev(iface);
681 dev->intf = iface;
682
683 dev->bulk_in = usb_rcvbulkpipe(dev->udev,
684 bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
685
686 dev->bulk_out = usb_sndbulkpipe(dev->udev,
687 bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
688
689 usb_set_intfdata(iface, dev);
690
Hemant Kumar14401d52011-11-03 16:40:32 -0700691 /*allocate list of rx urbs*/
Hemant Kumar262682a2013-02-01 09:46:45 -0800692 retval = data_bridge_prepare_rx(dev);
693 if (retval) {
694 platform_device_put(dev->pdev);
695 return retval;
696 }
Hemant Kumar14401d52011-11-03 16:40:32 -0700697
698 platform_device_add(dev->pdev);
699
700 return 0;
701}
702
703#if defined(CONFIG_DEBUG_FS)
704#define DEBUG_BUF_SIZE 1024
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800705
706static unsigned int record_timestamp;
707module_param(record_timestamp, uint, S_IRUGO | S_IWUSR);
708
709static struct timestamp_buf dbg_data = {
710 .idx = 0,
711 .lck = __RW_LOCK_UNLOCKED(lck)
712};
713
714/*get_timestamp - returns time of day in us */
715static unsigned int get_timestamp(void)
716{
717 struct timeval tval;
718 unsigned int stamp;
719
720 if (!record_timestamp)
721 return 0;
722
723 do_gettimeofday(&tval);
724 /* 2^32 = 4294967296. Limit to 4096s. */
725 stamp = tval.tv_sec & 0xFFF;
726 stamp = stamp * 1000000 + tval.tv_usec;
727 return stamp;
728}
729
730static void dbg_inc(unsigned *idx)
731{
732 *idx = (*idx + 1) & (DBG_DATA_MAX-1);
733}
734
735/**
736* dbg_timestamp - Stores timestamp values of a SKB life cycle
737* to debug buffer
738* @event: "UL": Uplink Data
739* @skb: SKB used to store timestamp values to debug buffer
740*/
741static void dbg_timestamp(char *event, struct sk_buff * skb)
742{
743 unsigned long flags;
744 struct timestamp_info *info = (struct timestamp_info *)skb->cb;
745
746 if (!record_timestamp)
747 return;
748
749 write_lock_irqsave(&dbg_data.lck, flags);
750
751 scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
752 "%p %u[%s] %u %u %u %u %u %u\n",
753 skb, skb->len, event, info->created, info->rx_queued,
754 info->rx_done, info->rx_done_sent, info->tx_queued,
755 get_timestamp());
756
757 dbg_inc(&dbg_data.idx);
758
759 write_unlock_irqrestore(&dbg_data.lck, flags);
760}
761
762/* show_timestamp: displays the timestamp buffer */
763static ssize_t show_timestamp(struct file *file, char __user *ubuf,
764 size_t count, loff_t *ppos)
765{
766 unsigned long flags;
767 unsigned i;
768 unsigned j = 0;
769 char *buf;
770 int ret = 0;
771
772 if (!record_timestamp)
773 return 0;
774
775 buf = kzalloc(sizeof(char) * 4 * DEBUG_BUF_SIZE, GFP_KERNEL);
776 if (!buf)
777 return -ENOMEM;
778
779 read_lock_irqsave(&dbg_data.lck, flags);
780
781 i = dbg_data.idx;
782 for (dbg_inc(&i); i != dbg_data.idx; dbg_inc(&i)) {
783 if (!strnlen(dbg_data.buf[i], DBG_DATA_MSG))
784 continue;
785 j += scnprintf(buf + j, (4 * DEBUG_BUF_SIZE) - j,
786 "%s\n", dbg_data.buf[i]);
787 }
788
789 read_unlock_irqrestore(&dbg_data.lck, flags);
790
791 ret = simple_read_from_buffer(ubuf, count, ppos, buf, j);
792
793 kfree(buf);
794
795 return ret;
796}
797
798const struct file_operations data_timestamp_ops = {
799 .read = show_timestamp,
800};
801
Hemant Kumar14401d52011-11-03 16:40:32 -0700802static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf,
803 size_t count, loff_t *ppos)
804{
805 struct data_bridge *dev;
806 char *buf;
807 int ret;
808 int i;
809 int temp = 0;
810
811 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
812 if (!buf)
813 return -ENOMEM;
814
815 for (i = 0; i < ch_id; i++) {
816 dev = __dev[i];
817 if (!dev)
818 continue;
819
820 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
821 "\nName#%s dev %p\n"
822 "pending tx urbs: %u\n"
823 "tx urb drp cnt: %u\n"
824 "to host: %lu\n"
825 "to mdm: %lu\n"
826 "tx throttled cnt: %u\n"
827 "tx unthrottled cnt: %u\n"
828 "rx throttled cnt: %u\n"
829 "rx unthrottled cnt: %u\n"
830 "rx done skb qlen: %u\n"
Hemant Kumar06b7e432012-01-19 22:13:50 -0800831 "dev err: %d\n"
Hemant Kumar14401d52011-11-03 16:40:32 -0700832 "suspended: %d\n"
833 "TX_HALT: %d\n"
834 "RX_HALT: %d\n",
835 dev->pdev->name, dev,
836 atomic_read(&dev->pending_txurbs),
837 dev->txurb_drp_cnt,
838 dev->to_host,
839 dev->to_modem,
840 dev->tx_throttled_cnt,
841 dev->tx_unthrottled_cnt,
842 dev->rx_throttled_cnt,
843 dev->rx_unthrottled_cnt,
844 dev->rx_done.qlen,
Hemant Kumar06b7e432012-01-19 22:13:50 -0800845 dev->err,
Hemant Kumar14401d52011-11-03 16:40:32 -0700846 test_bit(SUSPENDED, &dev->flags),
847 test_bit(TX_HALT, &dev->flags),
848 test_bit(RX_HALT, &dev->flags));
849
850 }
851
852 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
853
854 kfree(buf);
855
856 return ret;
857}
858
859static ssize_t data_bridge_reset_stats(struct file *file,
860 const char __user *buf, size_t count, loff_t *ppos)
861{
862 struct data_bridge *dev;
863 int i;
864
865 for (i = 0; i < ch_id; i++) {
866 dev = __dev[i];
867 if (!dev)
868 continue;
869
870 dev->to_host = 0;
871 dev->to_modem = 0;
872 dev->txurb_drp_cnt = 0;
873 dev->tx_throttled_cnt = 0;
874 dev->tx_unthrottled_cnt = 0;
875 dev->rx_throttled_cnt = 0;
876 dev->rx_unthrottled_cnt = 0;
877 }
878 return count;
879}
880
881const struct file_operations data_stats_ops = {
882 .read = data_bridge_read_stats,
883 .write = data_bridge_reset_stats,
884};
885
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800886static struct dentry *data_dent;
887static struct dentry *data_dfile_stats;
888static struct dentry *data_dfile_tstamp;
889
Hemant Kumar14401d52011-11-03 16:40:32 -0700890static void data_bridge_debugfs_init(void)
891{
892 data_dent = debugfs_create_dir("data_hsic_bridge", 0);
893 if (IS_ERR(data_dent))
894 return;
895
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800896 data_dfile_stats = debugfs_create_file("status", 0644, data_dent, 0,
897 &data_stats_ops);
898 if (!data_dfile_stats || IS_ERR(data_dfile_stats)) {
899 debugfs_remove(data_dent);
900 return;
901 }
902
903 data_dfile_tstamp = debugfs_create_file("timestamp", 0644, data_dent,
904 0, &data_timestamp_ops);
905 if (!data_dfile_tstamp || IS_ERR(data_dfile_tstamp))
Hemant Kumar14401d52011-11-03 16:40:32 -0700906 debugfs_remove(data_dent);
907}
908
909static void data_bridge_debugfs_exit(void)
910{
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800911 debugfs_remove(data_dfile_stats);
912 debugfs_remove(data_dfile_tstamp);
Hemant Kumar14401d52011-11-03 16:40:32 -0700913 debugfs_remove(data_dent);
914}
915
916#else
917static void data_bridge_debugfs_init(void) { }
918static void data_bridge_debugfs_exit(void) { }
Hemant Kumarc72ab2b2012-01-10 12:33:49 -0800919static void dbg_timestamp(char *event, struct sk_buff * skb)
920{
921 return;
922}
923
924static unsigned int get_timestamp(void)
925{
926 return 0;
927}
928
Hemant Kumar14401d52011-11-03 16:40:32 -0700929#endif
930
931static int __devinit
932bridge_probe(struct usb_interface *iface, const struct usb_device_id *id)
933{
934 struct usb_host_endpoint *endpoint = NULL;
935 struct usb_host_endpoint *bulk_in = NULL;
936 struct usb_host_endpoint *bulk_out = NULL;
937 struct usb_host_endpoint *int_in = NULL;
938 struct usb_device *udev;
939 int i;
940 int status = 0;
941 int numends;
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800942 unsigned int iface_num;
Hemant Kumar14401d52011-11-03 16:40:32 -0700943
944 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
945
946 if (iface->num_altsetting != 1) {
947 err("%s invalid num_altsetting %u\n",
948 __func__, iface->num_altsetting);
949 return -EINVAL;
950 }
951
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800952 if (!test_bit(iface_num, &id->driver_info))
Hemant Kumar46f9f242011-12-15 20:20:58 -0800953 return -ENODEV;
Hemant Kumar14401d52011-11-03 16:40:32 -0700954
Jack Pham1b1ba472012-06-13 16:40:15 -0700955 udev = interface_to_usbdev(iface);
956 usb_get_dev(udev);
957
Hemant Kumar14401d52011-11-03 16:40:32 -0700958 numends = iface->cur_altsetting->desc.bNumEndpoints;
959 for (i = 0; i < numends; i++) {
960 endpoint = iface->cur_altsetting->endpoint + i;
961 if (!endpoint) {
Jack Phame8741502012-06-13 17:34:07 -0700962 dev_err(&iface->dev, "%s: invalid endpoint %u\n",
Hemant Kumar14401d52011-11-03 16:40:32 -0700963 __func__, i);
964 status = -EINVAL;
965 goto out;
966 }
967
968 if (usb_endpoint_is_bulk_in(&endpoint->desc))
969 bulk_in = endpoint;
970 else if (usb_endpoint_is_bulk_out(&endpoint->desc))
971 bulk_out = endpoint;
972 else if (usb_endpoint_is_int_in(&endpoint->desc))
973 int_in = endpoint;
974 }
975
976 if (!bulk_in || !bulk_out || !int_in) {
Jack Phame8741502012-06-13 17:34:07 -0700977 dev_err(&iface->dev, "%s: invalid endpoints\n", __func__);
Hemant Kumar14401d52011-11-03 16:40:32 -0700978 status = -EINVAL;
979 goto out;
980 }
981
982 status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id);
983 if (status < 0) {
Jack Phame8741502012-06-13 17:34:07 -0700984 dev_err(&iface->dev, "data_bridge_probe failed %d\n", status);
Hemant Kumar14401d52011-11-03 16:40:32 -0700985 goto out;
986 }
987
988 status = ctrl_bridge_probe(iface, int_in, ch_id);
989 if (status < 0) {
Jack Phame8741502012-06-13 17:34:07 -0700990 dev_err(&iface->dev, "ctrl_bridge_probe failed %d\n", status);
Hemant Kumar262682a2013-02-01 09:46:45 -0800991 goto error;
Hemant Kumar14401d52011-11-03 16:40:32 -0700992 }
Hemant Kumar67a4fd02012-01-05 15:44:36 -0800993
Hemant Kumar14401d52011-11-03 16:40:32 -0700994 ch_id++;
995
996 return 0;
997
Hemant Kumar262682a2013-02-01 09:46:45 -0800998error:
999 platform_device_put(__dev[ch_id]->pdev);
1000 free_rx_urbs(__dev[ch_id]);
Hemant Kumar14401d52011-11-03 16:40:32 -07001001 usb_set_intfdata(iface, NULL);
Hemant Kumar14401d52011-11-03 16:40:32 -07001002out:
1003 usb_put_dev(udev);
1004
1005 return status;
1006}
1007
1008static void bridge_disconnect(struct usb_interface *intf)
1009{
1010 struct data_bridge *dev = usb_get_intfdata(intf);
Hemant Kumar14401d52011-11-03 16:40:32 -07001011
1012 if (!dev) {
1013 err("%s: data device not found\n", __func__);
1014 return;
1015 }
1016
Hemant Kumar14401d52011-11-03 16:40:32 -07001017 ch_id--;
Hemant Kumar45bdca82012-09-02 11:10:35 -07001018 ctrl_bridge_disconnect(dev->id);
Jack Pham1b1ba472012-06-13 16:40:15 -07001019 platform_device_unregister(dev->pdev);
Hemant Kumar14401d52011-11-03 16:40:32 -07001020 usb_set_intfdata(intf, NULL);
Hemant Kumar14401d52011-11-03 16:40:32 -07001021
Hemant Kumar262682a2013-02-01 09:46:45 -08001022 free_rx_urbs(dev);
Hemant Kumar14401d52011-11-03 16:40:32 -07001023
1024 usb_put_dev(dev->udev);
Hemant Kumar14401d52011-11-03 16:40:32 -07001025}
1026
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001027/*bit position represents interface number*/
1028#define PID9001_IFACE_MASK 0xC
1029#define PID9034_IFACE_MASK 0xC
1030#define PID9048_IFACE_MASK 0x18
Hemant Kumare97cbb32012-02-24 13:00:57 -08001031#define PID904C_IFACE_MASK 0x28
Hemant Kumarce3c5bf2012-12-06 15:52:02 -08001032#define PID9075_IFACE_MASK 0x28
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001033
Hemant Kumar14401d52011-11-03 16:40:32 -07001034static const struct usb_device_id bridge_ids[] = {
Hemant Kumar67a4fd02012-01-05 15:44:36 -08001035 { USB_DEVICE(0x5c6, 0x9001),
1036 .driver_info = PID9001_IFACE_MASK,
1037 },
1038 { USB_DEVICE(0x5c6, 0x9034),
1039 .driver_info = PID9034_IFACE_MASK,
1040 },
1041 { USB_DEVICE(0x5c6, 0x9048),
1042 .driver_info = PID9048_IFACE_MASK,
1043 },
Hemant Kumare97cbb32012-02-24 13:00:57 -08001044 { USB_DEVICE(0x5c6, 0x904c),
1045 .driver_info = PID904C_IFACE_MASK,
1046 },
Hemant Kumarce3c5bf2012-12-06 15:52:02 -08001047 { USB_DEVICE(0x5c6, 0x9075),
1048 .driver_info = PID9075_IFACE_MASK,
1049 },
Jack Phamb1ad7152011-12-07 10:58:11 -08001050
1051 { } /* Terminating entry */
Hemant Kumar14401d52011-11-03 16:40:32 -07001052};
Hemant Kumar14401d52011-11-03 16:40:32 -07001053MODULE_DEVICE_TABLE(usb, bridge_ids);
1054
1055static struct usb_driver bridge_driver = {
1056 .name = "mdm_bridge",
1057 .probe = bridge_probe,
1058 .disconnect = bridge_disconnect,
1059 .id_table = bridge_ids,
1060 .suspend = bridge_suspend,
1061 .resume = bridge_resume,
1062 .supports_autosuspend = 1,
1063};
1064
1065static int __init bridge_init(void)
1066{
Hemant Kumar262682a2013-02-01 09:46:45 -08001067 struct data_bridge *dev;
1068 int ret;
1069 int i = 0;
1070
1071 ret = ctrl_bridge_init();
1072 if (ret)
1073 return ret;
1074
1075 bridge_wq = create_singlethread_workqueue("mdm_bridge");
1076 if (!bridge_wq) {
1077 pr_err("%s: Unable to create workqueue:bridge\n", __func__);
1078 ret = -ENOMEM;
1079 goto free_ctrl;
1080 }
1081
1082 for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
1083
1084 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1085 if (!dev) {
1086 err("%s: unable to allocate dev\n", __func__);
1087 ret = -ENOMEM;
1088 goto error;
1089 }
1090
1091 dev->wq = bridge_wq;
1092
1093 init_usb_anchor(&dev->tx_active);
1094 init_usb_anchor(&dev->rx_active);
1095 init_usb_anchor(&dev->delayed);
1096
1097 INIT_LIST_HEAD(&dev->rx_idle);
1098
1099 skb_queue_head_init(&dev->rx_done);
1100
1101 INIT_WORK(&dev->kevent, defer_kevent);
1102 INIT_WORK(&dev->process_rx_w, data_bridge_process_rx);
1103
1104 __dev[i] = dev;
1105 }
Hemant Kumar14401d52011-11-03 16:40:32 -07001106
1107 ret = usb_register(&bridge_driver);
1108 if (ret) {
1109 err("%s: unable to register mdm_bridge driver", __func__);
Hemant Kumar262682a2013-02-01 09:46:45 -08001110 goto error;
Hemant Kumar14401d52011-11-03 16:40:32 -07001111 }
1112
1113 data_bridge_debugfs_init();
1114
1115 return 0;
Hemant Kumar262682a2013-02-01 09:46:45 -08001116
1117error:
1118 while (--i >= 0) {
1119 kfree(__dev[i]);
1120 __dev[i] = NULL;
1121 }
1122 destroy_workqueue(bridge_wq);
1123free_ctrl:
1124 ctrl_bridge_exit();
1125 return ret;
Hemant Kumar14401d52011-11-03 16:40:32 -07001126}
1127
1128static void __exit bridge_exit(void)
1129{
Hemant Kumar262682a2013-02-01 09:46:45 -08001130 int i;
1131
1132 usb_deregister(&bridge_driver);
Hemant Kumar14401d52011-11-03 16:40:32 -07001133 data_bridge_debugfs_exit();
1134 destroy_workqueue(bridge_wq);
Hemant Kumar262682a2013-02-01 09:46:45 -08001135
1136 for (i = 0; i < MAX_BRIDGE_DEVICES; i++) {
1137 kfree(__dev[i]);
1138 __dev[i] = NULL;
1139 }
1140
1141 ctrl_bridge_exit();
Hemant Kumar14401d52011-11-03 16:40:32 -07001142}
1143
1144module_init(bridge_init);
1145module_exit(bridge_exit);
1146
1147MODULE_DESCRIPTION("Qualcomm modem data bridge driver");
1148MODULE_LICENSE("GPL v2");