blob: 5a6faf24ad53e2423e3f280ffce43e287347564c [file] [log] [blame]
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +05301/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
Shimrit Malichi194fe122012-07-25 13:50:41 +030027#include <linux/usb/msm_hsusb.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020028#include <mach/usb_bam.h>
29
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include "u_rmnet.h"
31
32#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020033#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static struct workqueue_struct *gbam_wq;
36static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020037static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053038static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039static unsigned bam_ch_ids[] = { 8 };
40
Jack Phameffd4ae2011-08-03 16:49:36 -070041static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
42
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080045#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
46#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#define BAM_MUX_HDR 8
50
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051#define BAM_MUX_RX_Q_SIZE 16
52#define BAM_MUX_TX_Q_SIZE 200
Manu Gautam15203302012-09-26 11:12:54 +053053#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053055#define DL_INTR_THRESHOLD 20
56
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
58module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
61module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
64module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
67module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Vamsi Krishna8f24f252011-11-02 11:46:08 -070069unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
70module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
Vamsi Krishna8f24f252011-11-02 11:46:08 -070072unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
73module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Vamsi Krishna8f24f252011-11-02 11:46:08 -070075unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
76module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053078unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
79module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
80
Jack Phameffd4ae2011-08-03 16:49:36 -070081#define BAM_CH_OPENED BIT(0)
82#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070085 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 unsigned id;
87
88 struct list_head tx_idle;
89 struct sk_buff_head tx_skb_q;
90
91 struct list_head rx_idle;
92 struct sk_buff_head rx_skb_q;
93
94 struct gbam_port *port;
95 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053096 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
Ofir Cohena1c2a872011-12-14 10:26:34 +020098 struct usb_request *rx_req;
99 struct usb_request *tx_req;
100
Shimrit Malichi255b5342012-08-02 21:01:43 +0300101 u32 src_pipe_idx;
102 u32 dst_pipe_idx;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200103 u8 connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200104 enum transport_type trans;
Ofir Cohen77848d62012-12-05 13:16:10 +0200105 struct usb_bam_connect_ipa_params ipa_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 /* stats */
108 unsigned int pending_with_bam;
109 unsigned int tohost_drp_cnt;
110 unsigned int tomodem_drp_cnt;
111 unsigned int tx_len;
112 unsigned int rx_len;
113 unsigned long to_modem;
114 unsigned long to_host;
115};
116
117struct gbam_port {
118 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530119 spinlock_t port_lock_ul;
120 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121
122 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200123 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124
125 struct bam_ch_info data_ch;
126
127 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800128 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129};
130
131static struct bam_portmaster {
132 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700133 struct platform_driver pdrv;
134} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135
Ofir Cohena1c2a872011-12-14 10:26:34 +0200136struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200138static void gbam_start_endless_rx(struct gbam_port *port);
139static void gbam_start_endless_tx(struct gbam_port *port);
Amit Blay94525352012-12-24 11:23:27 +0200140static int gbam_peer_reset_cb(void *param);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141
142/*---------------misc functions---------------- */
143static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
144{
145 struct usb_request *req;
146
147 while (!list_empty(head)) {
148 req = list_entry(head->next, struct usb_request, list);
149 list_del(&req->list);
150 usb_ep_free_request(ep, req);
151 }
152}
153
154static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
155 int num,
156 void (*cb)(struct usb_ep *ep, struct usb_request *),
157 gfp_t flags)
158{
159 int i;
160 struct usb_request *req;
161
162 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
163 ep, head, num, cb);
164
165 for (i = 0; i < num; i++) {
166 req = usb_ep_alloc_request(ep, flags);
167 if (!req) {
168 pr_debug("%s: req allocated:%d\n", __func__, i);
169 return list_empty(head) ? -ENOMEM : 0;
170 }
171 req->complete = cb;
172 list_add(&req->list, head);
173 }
174
175 return 0;
176}
177/*--------------------------------------------- */
178
179/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530180static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181{
182 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530183 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 struct sk_buff *skb;
185 int ret;
186 struct usb_request *req;
187 struct usb_ep *ep;
188
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530189 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530191 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 return;
193 }
194
195 ep = port->port_usb->in;
196
197 while (!list_empty(&d->tx_idle)) {
198 skb = __skb_dequeue(&d->tx_skb_q);
199 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530200 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 return;
202 }
203 req = list_first_entry(&d->tx_idle,
204 struct usb_request,
205 list);
206 req->context = skb;
207 req->buf = skb->data;
208 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530209 n_tx_req_queued++;
210 if (n_tx_req_queued == dl_intr_threshold) {
211 req->no_interrupt = 0;
212 n_tx_req_queued = 0;
213 } else {
214 req->no_interrupt = 1;
215 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216
217 list_del(&req->list);
218
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530219 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530221 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 if (ret) {
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530223 pr_err("%s: usb epIn failed with %d\n", __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 list_add(&req->list, &d->tx_idle);
225 dev_kfree_skb_any(skb);
226 break;
227 }
228 d->to_host++;
229 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530230 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231}
232
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530233static void gbam_write_data_tohost_w(struct work_struct *w)
234{
235 struct bam_ch_info *d;
236 struct gbam_port *port;
237
238 d = container_of(w, struct bam_ch_info, write_tohost_w);
239 port = d->port;
240
241 gbam_write_data_tohost(port);
242}
243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244void gbam_data_recv_cb(void *p, struct sk_buff *skb)
245{
246 struct gbam_port *port = p;
247 struct bam_ch_info *d = &port->data_ch;
248 unsigned long flags;
249
250 if (!skb)
251 return;
252
253 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
254 port, port->port_num, d, skb->len);
255
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530256 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530258 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 dev_kfree_skb_any(skb);
260 return;
261 }
262
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700263 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 d->tohost_drp_cnt++;
265 if (printk_ratelimit())
266 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
267 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530268 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 dev_kfree_skb_any(skb);
270 return;
271 }
272
273 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530274 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530276 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277}
278
279void gbam_data_write_done(void *p, struct sk_buff *skb)
280{
281 struct gbam_port *port = p;
282 struct bam_ch_info *d = &port->data_ch;
283 unsigned long flags;
284
285 if (!skb)
286 return;
287
288 dev_kfree_skb_any(skb);
289
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530290 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291
292 d->pending_with_bam--;
293
294 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
295 port, d, d->to_modem,
296 d->pending_with_bam, port->port_num);
297
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530298 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299
Vamsi Krishna84579552011-11-09 15:33:22 -0800300 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301}
302
303static void gbam_data_write_tobam(struct work_struct *w)
304{
305 struct gbam_port *port;
306 struct bam_ch_info *d;
307 struct sk_buff *skb;
308 unsigned long flags;
309 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800310 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311
312 d = container_of(w, struct bam_ch_info, write_tobam_w);
313 port = d->port;
314
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530315 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530317 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 return;
319 }
320
Vamsi Krishna84579552011-11-09 15:33:22 -0800321 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800322 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800323 if (!skb)
324 break;
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 d->pending_with_bam++;
327 d->to_modem++;
328
329 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
330 port, d, d->to_modem, d->pending_with_bam,
331 port->port_num);
332
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530333 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530335 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 if (ret) {
337 pr_debug("%s: write error:%d\n", __func__, ret);
338 d->pending_with_bam--;
339 d->to_modem--;
340 d->tomodem_drp_cnt++;
341 dev_kfree_skb_any(skb);
342 break;
343 }
344 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800345
346 qlen = d->rx_skb_q.qlen;
347
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530348 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800349
350 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
351 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352}
353/*-------------------------------------------------------------*/
354
355static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
356{
357 struct gbam_port *port = ep->driver_data;
358 struct bam_ch_info *d;
359 struct sk_buff *skb = req->context;
360 int status = req->status;
361
362 switch (status) {
363 case 0:
364 /* successful completion */
365 case -ECONNRESET:
366 case -ESHUTDOWN:
367 /* connection gone */
368 break;
369 default:
370 pr_err("%s: data tx ep error %d\n",
371 __func__, status);
372 break;
373 }
374
375 dev_kfree_skb_any(skb);
376
377 if (!port)
378 return;
379
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530380 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 d = &port->data_ch;
382 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530383 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530385 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386}
387
388static void
389gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
390{
391 struct gbam_port *port = ep->driver_data;
392 struct bam_ch_info *d = &port->data_ch;
393 struct sk_buff *skb = req->context;
394 int status = req->status;
395 int queue = 0;
396
397 switch (status) {
398 case 0:
399 skb_put(skb, req->actual);
400 queue = 1;
401 break;
402 case -ECONNRESET:
403 case -ESHUTDOWN:
404 /* cable disconnection */
405 dev_kfree_skb_any(skb);
406 req->buf = 0;
407 usb_ep_free_request(ep, req);
408 return;
409 default:
410 if (printk_ratelimit())
411 pr_err("%s: %s response error %d, %d/%d\n",
412 __func__, ep->name, status,
413 req->actual, req->length);
414 dev_kfree_skb_any(skb);
415 break;
416 }
417
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530418 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 if (queue) {
420 __skb_queue_tail(&d->rx_skb_q, skb);
421 queue_work(gbam_wq, &d->write_tobam_w);
422 }
423
424 /* TODO: Handle flow control gracefully by having
425 * having call back mechanism from bam driver
426 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700427 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800428 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429
430 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530431 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 return;
433 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530434 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700436 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530438 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530440 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 return;
442 }
443 skb_reserve(skb, BAM_MUX_HDR);
444
445 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700446 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 req->context = skb;
448
449 status = usb_ep_queue(ep, req, GFP_ATOMIC);
450 if (status) {
451 dev_kfree_skb_any(skb);
452
453 if (printk_ratelimit())
454 pr_err("%s: data rx enqueue err %d\n",
455 __func__, status);
456
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530457 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530459 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 }
461}
462
Ofir Cohena1c2a872011-12-14 10:26:34 +0200463static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
464{
465 int status = req->status;
466
467 pr_debug("%s status: %d\n", __func__, status);
468}
469
470static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
471{
472 int status = req->status;
473
474 pr_debug("%s status: %d\n", __func__, status);
475}
476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477static void gbam_start_rx(struct gbam_port *port)
478{
479 struct usb_request *req;
480 struct bam_ch_info *d;
481 struct usb_ep *ep;
482 unsigned long flags;
483 int ret;
484 struct sk_buff *skb;
485
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530486 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530488 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 return;
490 }
491
492 d = &port->data_ch;
493 ep = port->port_usb->out;
494
495 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800496
497 if (bam_mux_rx_fctrl_support &&
498 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
499 break;
500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 req = list_first_entry(&d->rx_idle, struct usb_request, list);
502
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700503 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (!skb)
505 break;
506 skb_reserve(skb, BAM_MUX_HDR);
507
508 list_del(&req->list);
509 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700510 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 req->context = skb;
512
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530513 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530515 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 if (ret) {
517 dev_kfree_skb_any(skb);
518
519 if (printk_ratelimit())
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530520 pr_err("%s: rx queue failed %d\n",
521 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522
523 if (port->port_usb)
524 list_add(&req->list, &d->rx_idle);
525 else
526 usb_ep_free_request(ep, req);
527 break;
528 }
529 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530530 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531}
532
Ofir Cohena1c2a872011-12-14 10:26:34 +0200533static void gbam_start_endless_rx(struct gbam_port *port)
534{
535 struct bam_ch_info *d = &port->data_ch;
536 int status;
537
Amit Blay94525352012-12-24 11:23:27 +0200538 if (!port->port_usb)
539 return;
540
Ofir Cohena1c2a872011-12-14 10:26:34 +0200541 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
542 if (status)
543 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
544}
545
546static void gbam_start_endless_tx(struct gbam_port *port)
547{
548 struct bam_ch_info *d = &port->data_ch;
549 int status;
550
Amit Blay94525352012-12-24 11:23:27 +0200551 if (!port->port_usb)
552 return;
553
Ofir Cohena1c2a872011-12-14 10:26:34 +0200554 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
555 if (status)
556 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
557}
558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559static void gbam_start_io(struct gbam_port *port)
560{
561 unsigned long flags;
562 struct usb_ep *ep;
563 int ret;
564 struct bam_ch_info *d;
565
566 pr_debug("%s: port:%p\n", __func__, port);
567
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530568 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530570 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 return;
572 }
573
574 d = &port->data_ch;
575 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700576 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 gbam_epout_complete, GFP_ATOMIC);
578 if (ret) {
579 pr_err("%s: rx req allocation failed\n", __func__);
580 return;
581 }
582
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530583 spin_unlock_irqrestore(&port->port_lock_ul, flags);
584 spin_lock_irqsave(&port->port_lock_dl, flags);
Chiranjeevi Velempati44d02982013-02-17 22:09:08 +0530585 if (!port->port_usb) {
586 gbam_free_requests(ep, &d->rx_idle);
587 spin_unlock_irqrestore(&port->port_lock_dl, flags);
588 return;
589 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700591 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 gbam_epin_complete, GFP_ATOMIC);
593 if (ret) {
594 pr_err("%s: tx req allocation failed\n", __func__);
595 gbam_free_requests(ep, &d->rx_idle);
596 return;
597 }
598
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530599 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600
601 /* queue out requests */
602 gbam_start_rx(port);
603}
604
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600605static void gbam_notify(void *p, int event, unsigned long data)
606{
607 switch (event) {
608 case BAM_DMUX_RECEIVE:
609 gbam_data_recv_cb(p, (struct sk_buff *)(data));
610 break;
611 case BAM_DMUX_WRITE_DONE:
612 gbam_data_write_done(p, (struct sk_buff *)(data));
613 break;
614 }
615}
616
Ofir Cohena1c2a872011-12-14 10:26:34 +0200617static void gbam_free_buffers(struct gbam_port *port)
618{
619 struct sk_buff *skb;
620 unsigned long flags;
621 struct bam_ch_info *d;
622
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530623 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800624 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200625
626 if (!port || !port->port_usb)
627 goto free_buf_out;
628
629 d = &port->data_ch;
630
631 gbam_free_requests(port->port_usb->in, &d->tx_idle);
632 gbam_free_requests(port->port_usb->out, &d->rx_idle);
633
634 while ((skb = __skb_dequeue(&d->tx_skb_q)))
635 dev_kfree_skb_any(skb);
636
637 while ((skb = __skb_dequeue(&d->rx_skb_q)))
638 dev_kfree_skb_any(skb);
639
640free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800641 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530642 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200643}
644
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800645static void gbam_disconnect_work(struct work_struct *w)
646{
647 struct gbam_port *port =
648 container_of(w, struct gbam_port, disconnect_w);
649 struct bam_ch_info *d = &port->data_ch;
650
651 if (!test_bit(BAM_CH_OPENED, &d->flags))
652 return;
653
654 msm_bam_dmux_close(d->id);
655 clear_bit(BAM_CH_OPENED, &d->flags);
656}
657
Ofir Cohenfdecb602012-11-16 15:50:01 +0200658static void gbam2bam_disconnect_work(struct work_struct *w)
659{
Shimrit Malichi419fdac2013-01-16 14:35:31 +0200660 struct gbam_port *port =
661 container_of(w, struct gbam_port, disconnect_w);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200662 struct bam_ch_info *d = &port->data_ch;
663 int ret;
664
665 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohen77848d62012-12-05 13:16:10 +0200666 ret = usb_bam_disconnect_ipa(d->connection_idx, &d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200667 if (ret)
668 pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
669 __func__, ret);
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200670 teth_bridge_disconnect();
Ofir Cohenfdecb602012-11-16 15:50:01 +0200671 }
672}
673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674static void gbam_connect_work(struct work_struct *w)
675{
676 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
677 struct bam_ch_info *d = &port->data_ch;
678 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800679 unsigned long flags;
680
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530681 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800682 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800683 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800684 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530685 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800686 return;
687 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800688 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530689 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690
Jack Phameffd4ae2011-08-03 16:49:36 -0700691 if (!test_bit(BAM_CH_READY, &d->flags))
692 return;
693
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600694 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 if (ret) {
696 pr_err("%s: unable open bam ch:%d err:%d\n",
697 __func__, d->id, ret);
698 return;
699 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700700 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701
702 gbam_start_io(port);
703
704 pr_debug("%s: done\n", __func__);
705}
706
Ofir Cohena1c2a872011-12-14 10:26:34 +0200707static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700708{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200709 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200710 struct teth_bridge_connect_params connect_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200711 struct bam_ch_info *d = &port->data_ch;
712 u32 sps_params;
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200713 ipa_notify_cb usb_notify_cb;
714 void *priv;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200715 int ret;
Jack Phameffd4ae2011-08-03 16:49:36 -0700716
Ofir Cohenfdecb602012-11-16 15:50:01 +0200717 if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
718 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
719 &d->dst_pipe_idx);
720 if (ret) {
721 pr_err("%s: usb_bam_connect failed: err:%d\n",
722 __func__, ret);
723 return;
724 }
725 } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200726 ret = teth_bridge_init(&usb_notify_cb, &priv);
727 if (ret) {
728 pr_err("%s:teth_bridge_init() failed\n", __func__);
729 return;
730 }
731 d->ipa_params.notify = usb_notify_cb;
732 d->ipa_params.priv = priv;
733 d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
734
Ofir Cohen77848d62012-12-05 13:16:10 +0200735 d->ipa_params.client = IPA_CLIENT_USB_CONS;
736 d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
737 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200738 if (ret) {
739 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
740 __func__, ret);
741 return;
742 }
743
Ofir Cohen77848d62012-12-05 13:16:10 +0200744 d->ipa_params.client = IPA_CLIENT_USB_PROD;
745 d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
Ofir Cohen77848d62012-12-05 13:16:10 +0200746 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200747 if (ret) {
748 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
749 __func__, ret);
750 return;
751 }
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200752
753 connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
754 connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
755 connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
756 ret = teth_bridge_connect(&connect_params);
757 if (ret) {
758 pr_err("%s:teth_bridge_connect() failed\n", __func__);
759 return;
760 }
Ofir Cohena1c2a872011-12-14 10:26:34 +0200761 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700762
Ofir Cohena1c2a872011-12-14 10:26:34 +0200763 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
764 if (!d->rx_req)
765 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700766
Ofir Cohena1c2a872011-12-14 10:26:34 +0200767 d->rx_req->context = port;
768 d->rx_req->complete = gbam_endless_rx_complete;
769 d->rx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200770 sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
771 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200772 d->rx_req->udc_priv = sps_params;
773 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
774 if (!d->tx_req)
775 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700776
Ofir Cohena1c2a872011-12-14 10:26:34 +0200777 d->tx_req->context = port;
778 d->tx_req->complete = gbam_endless_tx_complete;
779 d->tx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200780 sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
781 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200782 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700783
Ofir Cohena1c2a872011-12-14 10:26:34 +0200784 /* queue in & out requests */
785 gbam_start_endless_rx(port);
786 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700787
Amit Blay94525352012-12-24 11:23:27 +0200788 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) {
789 /* Register for peer reset callback */
790 usb_bam_register_peer_reset_cb(d->connection_idx,
791 gbam_peer_reset_cb, port);
792
793 ret = usb_bam_client_ready(true);
794 if (ret) {
795 pr_err("%s: usb_bam_client_ready failed: err:%d\n",
796 __func__, ret);
797 return;
798 }
799 }
800
Ofir Cohena1c2a872011-12-14 10:26:34 +0200801 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700802}
803
Amit Blay94525352012-12-24 11:23:27 +0200804static int gbam_peer_reset_cb(void *param)
805{
806 struct gbam_port *port = (struct gbam_port *)param;
807 struct bam_ch_info *d;
808 struct f_rmnet *dev;
809 struct usb_gadget *gadget;
810 int ret;
811 bool reenable_eps = false;
812
813 dev = port_to_rmnet(port->gr);
814 d = &port->data_ch;
815
816 gadget = dev->cdev->gadget;
817
818 pr_debug("%s: reset by peer\n", __func__);
819
820 /* Disable the relevant EPs if currently EPs are enabled */
821 if (port->port_usb && port->port_usb->in &&
822 port->port_usb->in->driver_data) {
823 usb_ep_disable(port->port_usb->out);
824 usb_ep_disable(port->port_usb->in);
825
826 port->port_usb->in->driver_data = NULL;
827 port->port_usb->out->driver_data = NULL;
828 reenable_eps = true;
829 }
830
831 /* Disable BAM */
832 msm_hw_bam_disable(1);
833
834 /* Reset BAM */
835 ret = usb_bam_reset();
836 if (ret) {
837 pr_err("%s: BAM reset failed %d\n", __func__, ret);
838 goto reenable_eps;
839 }
840
841 /* Enable BAM */
842 msm_hw_bam_disable(0);
843
844reenable_eps:
845 /* Re-Enable the relevant EPs, if EPs were originally enabled */
846 if (reenable_eps) {
847 ret = usb_ep_enable(port->port_usb->in);
848 if (ret) {
849 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
850 __func__, port->port_usb->in);
851 return ret;
852 }
853 port->port_usb->in->driver_data = port;
854
855 ret = usb_ep_enable(port->port_usb->out);
856 if (ret) {
857 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
858 __func__, port->port_usb->out);
859 port->port_usb->in->driver_data = 0;
860 return ret;
861 }
862 port->port_usb->out->driver_data = port;
863
864 gbam_start_endless_rx(port);
865 gbam_start_endless_tx(port);
866 }
867
868 /* Unregister the peer reset callback */
869 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0)
870 usb_bam_register_peer_reset_cb(d->connection_idx, NULL, NULL);
871
872 return 0;
873}
874
Jack Phameffd4ae2011-08-03 16:49:36 -0700875/* BAM data channel ready, allow attempt to open */
876static int gbam_data_ch_probe(struct platform_device *pdev)
877{
878 struct gbam_port *port;
879 struct bam_ch_info *d;
880 int i;
881 unsigned long flags;
882
883 pr_debug("%s: name:%s\n", __func__, pdev->name);
884
885 for (i = 0; i < n_bam_ports; i++) {
886 port = bam_ports[i].port;
887 d = &port->data_ch;
888
889 if (!strncmp(bam_ch_names[i], pdev->name,
890 BAM_DMUX_CH_NAME_MAX_LEN)) {
891 set_bit(BAM_CH_READY, &d->flags);
892
893 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530894 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800895 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700896 if (port->port_usb)
897 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800898 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530899 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700900
901 break;
902 }
903 }
904
905 return 0;
906}
907
908/* BAM data channel went inactive, so close it */
909static int gbam_data_ch_remove(struct platform_device *pdev)
910{
911 struct gbam_port *port;
912 struct bam_ch_info *d;
913 struct usb_ep *ep_in = NULL;
914 struct usb_ep *ep_out = NULL;
915 unsigned long flags;
916 int i;
917
918 pr_debug("%s: name:%s\n", __func__, pdev->name);
919
920 for (i = 0; i < n_bam_ports; i++) {
921 if (!strncmp(bam_ch_names[i], pdev->name,
922 BAM_DMUX_CH_NAME_MAX_LEN)) {
923 port = bam_ports[i].port;
924 d = &port->data_ch;
925
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530926 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800927 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700928 if (port->port_usb) {
929 ep_in = port->port_usb->in;
930 ep_out = port->port_usb->out;
931 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800932 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530933 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700934
935 if (ep_in)
936 usb_ep_fifo_flush(ep_in);
937 if (ep_out)
938 usb_ep_fifo_flush(ep_out);
939
940 gbam_free_buffers(port);
941
942 msm_bam_dmux_close(d->id);
943
Vamsi Krishna7658bd12012-01-13 10:32:00 -0800944 /* bam dmux will free all pending skbs */
945 d->pending_with_bam = 0;
946
Jack Phameffd4ae2011-08-03 16:49:36 -0700947 clear_bit(BAM_CH_READY, &d->flags);
948 clear_bit(BAM_CH_OPENED, &d->flags);
949 }
950 }
951
952 return 0;
953}
954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955static void gbam_port_free(int portno)
956{
957 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700958 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959
Jack Phameffd4ae2011-08-03 16:49:36 -0700960 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700962 platform_driver_unregister(pdrv);
963 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964}
965
Ofir Cohena1c2a872011-12-14 10:26:34 +0200966static void gbam2bam_port_free(int portno)
967{
968 struct gbam_port *port = bam2bam_ports[portno];
969
970 kfree(port);
971}
972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973static int gbam_port_alloc(int portno)
974{
975 struct gbam_port *port;
976 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700977 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978
979 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
980 if (!port)
981 return -ENOMEM;
982
983 port->port_num = portno;
984
985 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530986 spin_lock_init(&port->port_lock_ul);
987 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800989 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990
991 /* data ch */
992 d = &port->data_ch;
993 d->port = port;
994 INIT_LIST_HEAD(&d->tx_idle);
995 INIT_LIST_HEAD(&d->rx_idle);
996 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530997 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 skb_queue_head_init(&d->tx_skb_q);
999 skb_queue_head_init(&d->rx_skb_q);
1000 d->id = bam_ch_ids[portno];
1001
1002 bam_ports[portno].port = port;
1003
Jack Phameffd4ae2011-08-03 16:49:36 -07001004 pdrv = &bam_ports[portno].pdrv;
1005 pdrv->probe = gbam_data_ch_probe;
1006 pdrv->remove = gbam_data_ch_remove;
1007 pdrv->driver.name = bam_ch_names[portno];
1008 pdrv->driver.owner = THIS_MODULE;
1009
1010 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001011 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1012
1013 return 0;
1014}
1015
1016static int gbam2bam_port_alloc(int portno)
1017{
1018 struct gbam_port *port;
1019 struct bam_ch_info *d;
1020
1021 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
1022 if (!port)
1023 return -ENOMEM;
1024
1025 port->port_num = portno;
1026
1027 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301028 spin_lock_init(&port->port_lock_ul);
1029 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001030
1031 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
Ofir Cohenfdecb602012-11-16 15:50:01 +02001032 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001033
1034 /* data ch */
1035 d = &port->data_ch;
1036 d->port = port;
1037 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -07001038
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1040
1041 return 0;
1042}
1043
1044#if defined(CONFIG_DEBUG_FS)
1045#define DEBUG_BUF_SIZE 1024
1046static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
1047 size_t count, loff_t *ppos)
1048{
1049 struct gbam_port *port;
1050 struct bam_ch_info *d;
1051 char *buf;
1052 unsigned long flags;
1053 int ret;
1054 int i;
1055 int temp = 0;
1056
1057 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
1058 if (!buf)
1059 return -ENOMEM;
1060
1061 for (i = 0; i < n_bam_ports; i++) {
1062 port = bam_ports[i].port;
1063 if (!port)
1064 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301065 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001066 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067
1068 d = &port->data_ch;
1069
1070 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
1071 "#PORT:%d port:%p data_ch:%p#\n"
1072 "dpkts_to_usbhost: %lu\n"
1073 "dpkts_to_modem: %lu\n"
1074 "dpkts_pwith_bam: %u\n"
1075 "to_usbhost_dcnt: %u\n"
1076 "tomodem__dcnt: %u\n"
1077 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -08001078 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -07001079 "data_ch_open: %d\n"
1080 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 i, port, &port->data_ch,
1082 d->to_host, d->to_modem,
1083 d->pending_with_bam,
1084 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -08001085 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -07001086 test_bit(BAM_CH_OPENED, &d->flags),
1087 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001089 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301090 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091 }
1092
1093 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
1094
1095 kfree(buf);
1096
1097 return ret;
1098}
1099
1100static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
1101 size_t count, loff_t *ppos)
1102{
1103 struct gbam_port *port;
1104 struct bam_ch_info *d;
1105 int i;
1106 unsigned long flags;
1107
1108 for (i = 0; i < n_bam_ports; i++) {
1109 port = bam_ports[i].port;
1110 if (!port)
1111 continue;
1112
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301113 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001114 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115
1116 d = &port->data_ch;
1117
1118 d->to_host = 0;
1119 d->to_modem = 0;
1120 d->pending_with_bam = 0;
1121 d->tohost_drp_cnt = 0;
1122 d->tomodem_drp_cnt = 0;
1123
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001124 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301125 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126 }
1127 return count;
1128}
1129
1130const struct file_operations gbam_stats_ops = {
1131 .read = gbam_read_stats,
1132 .write = gbam_reset_stats,
1133};
1134
1135static void gbam_debugfs_init(void)
1136{
1137 struct dentry *dent;
1138 struct dentry *dfile;
1139
1140 dent = debugfs_create_dir("usb_rmnet", 0);
1141 if (IS_ERR(dent))
1142 return;
1143
1144 /* TODO: Implement cleanup function to remove created file */
1145 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1146 if (!dfile || IS_ERR(dfile))
1147 debugfs_remove(dent);
1148}
1149#else
1150static void gam_debugfs_init(void) { }
1151#endif
1152
Ofir Cohen77848d62012-12-05 13:16:10 +02001153void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154{
1155 struct gbam_port *port;
1156 unsigned long flags;
1157 struct bam_ch_info *d;
1158
1159 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1160
Ofir Cohena1c2a872011-12-14 10:26:34 +02001161 if (trans == USB_GADGET_XPORT_BAM &&
1162 port_num >= n_bam_ports) {
1163 pr_err("%s: invalid bam portno#%d\n",
1164 __func__, port_num);
1165 return;
1166 }
1167
Ofir Cohenfdecb602012-11-16 15:50:01 +02001168 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1169 trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
Ofir Cohena1c2a872011-12-14 10:26:34 +02001170 port_num >= n_bam2bam_ports) {
1171 pr_err("%s: invalid bam2bam portno#%d\n",
1172 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 return;
1174 }
1175
1176 if (!gr) {
1177 pr_err("%s: grmnet port is null\n", __func__);
1178 return;
1179 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001180 if (trans == USB_GADGET_XPORT_BAM)
1181 port = bam_ports[port_num].port;
1182 else
1183 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001186 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001188 if (trans == USB_GADGET_XPORT_BAM)
Ofir Cohena1c2a872011-12-14 10:26:34 +02001189 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301191 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001192 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301193 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301194 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001195 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301196 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001198 /* disable endpoints */
1199 usb_ep_disable(gr->out);
1200 usb_ep_disable(gr->in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001202 gr->in->driver_data = NULL;
1203 gr->out->driver_data = NULL;
1204
Ofir Cohenfdecb602012-11-16 15:50:01 +02001205 if (trans == USB_GADGET_XPORT_BAM ||
1206 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001207 queue_work(gbam_wq, &port->disconnect_w);
Amit Blay94525352012-12-24 11:23:27 +02001208 else if (trans == USB_GADGET_XPORT_BAM2BAM) {
1209 if (port_num == 0) {
1210 if (usb_bam_client_ready(false)) {
1211 pr_err("%s: usb_bam_client_ready failed\n",
1212 __func__);
1213 }
1214 }
1215 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216}
1217
Ofir Cohena1c2a872011-12-14 10:26:34 +02001218int gbam_connect(struct grmnet *gr, u8 port_num,
Ofir Cohen77848d62012-12-05 13:16:10 +02001219 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220{
1221 struct gbam_port *port;
1222 struct bam_ch_info *d;
1223 int ret;
1224 unsigned long flags;
1225
1226 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1227
Ofir Cohena1c2a872011-12-14 10:26:34 +02001228 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1229 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1230 return -ENODEV;
1231 }
1232
Ofir Cohenfdecb602012-11-16 15:50:01 +02001233 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1234 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
1235 && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1237 return -ENODEV;
1238 }
1239
1240 if (!gr) {
1241 pr_err("%s: grmnet port is null\n", __func__);
1242 return -ENODEV;
1243 }
1244
Ofir Cohena1c2a872011-12-14 10:26:34 +02001245 if (trans == USB_GADGET_XPORT_BAM)
1246 port = bam_ports[port_num].port;
1247 else
1248 port = bam2bam_ports[port_num];
1249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 d = &port->data_ch;
1251
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001252 ret = usb_ep_enable(gr->in);
1253 if (ret) {
1254 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1255 __func__, gr->in);
1256 return ret;
1257 }
1258 gr->in->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001259
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001260 ret = usb_ep_enable(gr->out);
1261 if (ret) {
1262 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1263 __func__, gr->out);
1264 gr->in->driver_data = 0;
1265 return ret;
1266 }
1267 gr->out->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001268
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301269 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001270 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301271 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001272
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001273 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohena1c2a872011-12-14 10:26:34 +02001274 d->to_host = 0;
1275 d->to_modem = 0;
1276 d->pending_with_bam = 0;
1277 d->tohost_drp_cnt = 0;
1278 d->tomodem_drp_cnt = 0;
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001279 }
1280
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001281 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301282 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283
Ofir Cohen4da266f2012-01-03 10:19:29 +02001284 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1285 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001286 d->connection_idx = connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +02001287 } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohenfdecb602012-11-16 15:50:01 +02001288 port->gr = gr;
Ofir Cohen77848d62012-12-05 13:16:10 +02001289 d->ipa_params.src_pipe = &(d->src_pipe_idx);
1290 d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
1291 d->ipa_params.idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001292 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293
Ofir Cohenfdecb602012-11-16 15:50:01 +02001294 d->trans = trans;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295 queue_work(gbam_wq, &port->connect_w);
1296
1297 return 0;
1298}
1299
Ofir Cohena1c2a872011-12-14 10:26:34 +02001300int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301{
1302 int i;
1303 int ret;
1304
Ofir Cohena1c2a872011-12-14 10:26:34 +02001305 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1306 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307
Ofir Cohena1c2a872011-12-14 10:26:34 +02001308 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1309 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1310 pr_err("%s: Invalid num of ports count:%d,%d\n",
1311 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 return -EINVAL;
1313 }
1314
1315 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1316 if (!gbam_wq) {
1317 pr_err("%s: Unable to create workqueue gbam_wq\n",
1318 __func__);
1319 return -ENOMEM;
1320 }
1321
Ofir Cohena1c2a872011-12-14 10:26:34 +02001322 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301323 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 ret = gbam_port_alloc(i);
1325 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301326 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1328 goto free_bam_ports;
1329 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330 }
1331
Ofir Cohena1c2a872011-12-14 10:26:34 +02001332 for (i = 0; i < no_bam2bam_port; i++) {
1333 n_bam2bam_ports++;
1334 ret = gbam2bam_port_alloc(i);
1335 if (ret) {
1336 n_bam2bam_ports--;
1337 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1338 goto free_bam_ports;
1339 }
1340 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344free_bam_ports:
1345 for (i = 0; i < n_bam_ports; i++)
1346 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001347 for (i = 0; i < n_bam2bam_ports; i++)
1348 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349 destroy_workqueue(gbam_wq);
1350
1351 return ret;
1352}
Amit Blaye5bb35e2012-05-08 20:38:20 +03001353
1354static int gbam_wake_cb(void *param)
1355{
1356 struct gbam_port *port = (struct gbam_port *)param;
1357 struct bam_ch_info *d;
1358 struct f_rmnet *dev;
1359
1360 dev = port_to_rmnet(port->gr);
1361 d = &port->data_ch;
1362
1363 pr_debug("%s: woken up by peer\n", __func__);
1364
1365 return usb_gadget_wakeup(dev->cdev->gadget);
1366}
1367
1368void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
1369{
1370 struct gbam_port *port;
1371 struct bam_ch_info *d;
1372
Ofir Cohenfdecb602012-11-16 15:50:01 +02001373 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1374 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001375 return;
1376
1377 port = bam2bam_ports[port_num];
1378 d = &port->data_ch;
1379
1380 pr_debug("%s: suspended port %d\n", __func__, port_num);
1381
1382 usb_bam_register_wake_cb(d->connection_idx, gbam_wake_cb, port);
1383}
1384
1385void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
1386{
1387 struct gbam_port *port;
1388 struct bam_ch_info *d;
1389
Ofir Cohenfdecb602012-11-16 15:50:01 +02001390 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1391 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001392 return;
1393
1394 port = bam2bam_ports[port_num];
1395 d = &port->data_ch;
1396
1397 pr_debug("%s: resumed port %d\n", __func__, port_num);
1398
1399 usb_bam_register_wake_cb(d->connection_idx, NULL, NULL);
1400}