blob: a2997e9bc1fd7ab581c8f240a7838076b9d2bf46 [file] [log] [blame]
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +05301/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
Shimrit Malichi194fe122012-07-25 13:50:41 +030027#include <linux/usb/msm_hsusb.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020028#include <mach/usb_bam.h>
29
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include "u_rmnet.h"
31
32#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020033#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static struct workqueue_struct *gbam_wq;
36static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020037static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053038static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039static unsigned bam_ch_ids[] = { 8 };
40
Jack Phameffd4ae2011-08-03 16:49:36 -070041static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
42
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080045#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
46#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#define BAM_MUX_HDR 8
50
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051#define BAM_MUX_RX_Q_SIZE 16
52#define BAM_MUX_TX_Q_SIZE 200
Manu Gautam15203302012-09-26 11:12:54 +053053#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053055#define DL_INTR_THRESHOLD 20
56
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
58module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
61module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
64module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
67module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Vamsi Krishna8f24f252011-11-02 11:46:08 -070069unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
70module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
Vamsi Krishna8f24f252011-11-02 11:46:08 -070072unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
73module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Vamsi Krishna8f24f252011-11-02 11:46:08 -070075unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
76module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053078unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
79module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
80
Jack Phameffd4ae2011-08-03 16:49:36 -070081#define BAM_CH_OPENED BIT(0)
82#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070085 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 unsigned id;
87
88 struct list_head tx_idle;
89 struct sk_buff_head tx_skb_q;
90
91 struct list_head rx_idle;
92 struct sk_buff_head rx_skb_q;
93
94 struct gbam_port *port;
95 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053096 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
Ofir Cohena1c2a872011-12-14 10:26:34 +020098 struct usb_request *rx_req;
99 struct usb_request *tx_req;
100
Shimrit Malichi255b5342012-08-02 21:01:43 +0300101 u32 src_pipe_idx;
102 u32 dst_pipe_idx;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200103 u8 connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200104 enum transport_type trans;
Ofir Cohen77848d62012-12-05 13:16:10 +0200105 struct usb_bam_connect_ipa_params ipa_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 /* stats */
108 unsigned int pending_with_bam;
109 unsigned int tohost_drp_cnt;
110 unsigned int tomodem_drp_cnt;
111 unsigned int tx_len;
112 unsigned int rx_len;
113 unsigned long to_modem;
114 unsigned long to_host;
115};
116
117struct gbam_port {
118 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530119 spinlock_t port_lock_ul;
120 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121
122 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200123 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124
125 struct bam_ch_info data_ch;
126
127 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800128 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129};
130
131static struct bam_portmaster {
132 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700133 struct platform_driver pdrv;
134} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135
Ofir Cohena1c2a872011-12-14 10:26:34 +0200136struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200138static void gbam_start_endless_rx(struct gbam_port *port);
139static void gbam_start_endless_tx(struct gbam_port *port);
Amit Blay94525352012-12-24 11:23:27 +0200140static int gbam_peer_reset_cb(void *param);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141
142/*---------------misc functions---------------- */
143static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
144{
145 struct usb_request *req;
146
147 while (!list_empty(head)) {
148 req = list_entry(head->next, struct usb_request, list);
149 list_del(&req->list);
150 usb_ep_free_request(ep, req);
151 }
152}
153
154static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
155 int num,
156 void (*cb)(struct usb_ep *ep, struct usb_request *),
157 gfp_t flags)
158{
159 int i;
160 struct usb_request *req;
161
162 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
163 ep, head, num, cb);
164
165 for (i = 0; i < num; i++) {
166 req = usb_ep_alloc_request(ep, flags);
167 if (!req) {
168 pr_debug("%s: req allocated:%d\n", __func__, i);
169 return list_empty(head) ? -ENOMEM : 0;
170 }
171 req->complete = cb;
172 list_add(&req->list, head);
173 }
174
175 return 0;
176}
177/*--------------------------------------------- */
178
179/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530180static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181{
182 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530183 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 struct sk_buff *skb;
185 int ret;
186 struct usb_request *req;
187 struct usb_ep *ep;
188
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530189 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530191 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 return;
193 }
194
195 ep = port->port_usb->in;
196
197 while (!list_empty(&d->tx_idle)) {
198 skb = __skb_dequeue(&d->tx_skb_q);
199 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530200 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 return;
202 }
203 req = list_first_entry(&d->tx_idle,
204 struct usb_request,
205 list);
206 req->context = skb;
207 req->buf = skb->data;
208 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530209 n_tx_req_queued++;
210 if (n_tx_req_queued == dl_intr_threshold) {
211 req->no_interrupt = 0;
212 n_tx_req_queued = 0;
213 } else {
214 req->no_interrupt = 1;
215 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216
217 list_del(&req->list);
218
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530219 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530221 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 if (ret) {
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530223 pr_err("%s: usb epIn failed with %d\n", __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 list_add(&req->list, &d->tx_idle);
225 dev_kfree_skb_any(skb);
226 break;
227 }
228 d->to_host++;
229 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530230 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231}
232
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530233static void gbam_write_data_tohost_w(struct work_struct *w)
234{
235 struct bam_ch_info *d;
236 struct gbam_port *port;
237
238 d = container_of(w, struct bam_ch_info, write_tohost_w);
239 port = d->port;
240
241 gbam_write_data_tohost(port);
242}
243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244void gbam_data_recv_cb(void *p, struct sk_buff *skb)
245{
246 struct gbam_port *port = p;
247 struct bam_ch_info *d = &port->data_ch;
248 unsigned long flags;
249
250 if (!skb)
251 return;
252
253 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
254 port, port->port_num, d, skb->len);
255
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530256 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530258 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 dev_kfree_skb_any(skb);
260 return;
261 }
262
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700263 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 d->tohost_drp_cnt++;
265 if (printk_ratelimit())
266 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
267 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530268 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 dev_kfree_skb_any(skb);
270 return;
271 }
272
273 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530274 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530276 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277}
278
279void gbam_data_write_done(void *p, struct sk_buff *skb)
280{
281 struct gbam_port *port = p;
282 struct bam_ch_info *d = &port->data_ch;
283 unsigned long flags;
284
285 if (!skb)
286 return;
287
288 dev_kfree_skb_any(skb);
289
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530290 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291
292 d->pending_with_bam--;
293
294 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
295 port, d, d->to_modem,
296 d->pending_with_bam, port->port_num);
297
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530298 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299
Vamsi Krishna84579552011-11-09 15:33:22 -0800300 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301}
302
303static void gbam_data_write_tobam(struct work_struct *w)
304{
305 struct gbam_port *port;
306 struct bam_ch_info *d;
307 struct sk_buff *skb;
308 unsigned long flags;
309 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800310 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311
312 d = container_of(w, struct bam_ch_info, write_tobam_w);
313 port = d->port;
314
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530315 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530317 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 return;
319 }
320
Vamsi Krishna84579552011-11-09 15:33:22 -0800321 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800322 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800323 if (!skb)
324 break;
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 d->pending_with_bam++;
327 d->to_modem++;
328
329 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
330 port, d, d->to_modem, d->pending_with_bam,
331 port->port_num);
332
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530333 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530335 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 if (ret) {
337 pr_debug("%s: write error:%d\n", __func__, ret);
338 d->pending_with_bam--;
339 d->to_modem--;
340 d->tomodem_drp_cnt++;
341 dev_kfree_skb_any(skb);
342 break;
343 }
344 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800345
346 qlen = d->rx_skb_q.qlen;
347
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530348 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800349
350 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
351 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352}
353/*-------------------------------------------------------------*/
354
355static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
356{
357 struct gbam_port *port = ep->driver_data;
358 struct bam_ch_info *d;
359 struct sk_buff *skb = req->context;
360 int status = req->status;
361
362 switch (status) {
363 case 0:
364 /* successful completion */
365 case -ECONNRESET:
366 case -ESHUTDOWN:
367 /* connection gone */
368 break;
369 default:
370 pr_err("%s: data tx ep error %d\n",
371 __func__, status);
372 break;
373 }
374
375 dev_kfree_skb_any(skb);
376
377 if (!port)
378 return;
379
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530380 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 d = &port->data_ch;
382 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530383 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530385 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386}
387
388static void
389gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
390{
391 struct gbam_port *port = ep->driver_data;
392 struct bam_ch_info *d = &port->data_ch;
393 struct sk_buff *skb = req->context;
394 int status = req->status;
395 int queue = 0;
396
397 switch (status) {
398 case 0:
399 skb_put(skb, req->actual);
400 queue = 1;
401 break;
402 case -ECONNRESET:
403 case -ESHUTDOWN:
404 /* cable disconnection */
405 dev_kfree_skb_any(skb);
406 req->buf = 0;
407 usb_ep_free_request(ep, req);
408 return;
409 default:
410 if (printk_ratelimit())
411 pr_err("%s: %s response error %d, %d/%d\n",
412 __func__, ep->name, status,
413 req->actual, req->length);
414 dev_kfree_skb_any(skb);
415 break;
416 }
417
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530418 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 if (queue) {
420 __skb_queue_tail(&d->rx_skb_q, skb);
421 queue_work(gbam_wq, &d->write_tobam_w);
422 }
423
424 /* TODO: Handle flow control gracefully by having
425 * having call back mechanism from bam driver
426 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700427 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800428 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429
430 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530431 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 return;
433 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530434 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700436 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530438 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530440 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 return;
442 }
443 skb_reserve(skb, BAM_MUX_HDR);
444
445 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700446 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 req->context = skb;
448
449 status = usb_ep_queue(ep, req, GFP_ATOMIC);
450 if (status) {
451 dev_kfree_skb_any(skb);
452
453 if (printk_ratelimit())
454 pr_err("%s: data rx enqueue err %d\n",
455 __func__, status);
456
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530457 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530459 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 }
461}
462
Ofir Cohena1c2a872011-12-14 10:26:34 +0200463static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
464{
465 int status = req->status;
466
467 pr_debug("%s status: %d\n", __func__, status);
468}
469
470static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
471{
472 int status = req->status;
473
474 pr_debug("%s status: %d\n", __func__, status);
475}
476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477static void gbam_start_rx(struct gbam_port *port)
478{
479 struct usb_request *req;
480 struct bam_ch_info *d;
481 struct usb_ep *ep;
482 unsigned long flags;
483 int ret;
484 struct sk_buff *skb;
485
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530486 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530488 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 return;
490 }
491
492 d = &port->data_ch;
493 ep = port->port_usb->out;
494
495 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800496
497 if (bam_mux_rx_fctrl_support &&
498 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
499 break;
500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 req = list_first_entry(&d->rx_idle, struct usb_request, list);
502
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700503 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (!skb)
505 break;
506 skb_reserve(skb, BAM_MUX_HDR);
507
508 list_del(&req->list);
509 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700510 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 req->context = skb;
512
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530513 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530515 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 if (ret) {
517 dev_kfree_skb_any(skb);
518
519 if (printk_ratelimit())
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530520 pr_err("%s: rx queue failed %d\n",
521 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522
523 if (port->port_usb)
524 list_add(&req->list, &d->rx_idle);
525 else
526 usb_ep_free_request(ep, req);
527 break;
528 }
529 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530530 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531}
532
Ofir Cohena1c2a872011-12-14 10:26:34 +0200533static void gbam_start_endless_rx(struct gbam_port *port)
534{
535 struct bam_ch_info *d = &port->data_ch;
536 int status;
537
Amit Blay94525352012-12-24 11:23:27 +0200538 if (!port->port_usb)
539 return;
540
Ofir Cohena1c2a872011-12-14 10:26:34 +0200541 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
542 if (status)
543 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
544}
545
546static void gbam_start_endless_tx(struct gbam_port *port)
547{
548 struct bam_ch_info *d = &port->data_ch;
549 int status;
550
Amit Blay94525352012-12-24 11:23:27 +0200551 if (!port->port_usb)
552 return;
553
Ofir Cohena1c2a872011-12-14 10:26:34 +0200554 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
555 if (status)
556 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
557}
558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559static void gbam_start_io(struct gbam_port *port)
560{
561 unsigned long flags;
562 struct usb_ep *ep;
563 int ret;
564 struct bam_ch_info *d;
565
566 pr_debug("%s: port:%p\n", __func__, port);
567
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530568 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530570 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 return;
572 }
573
574 d = &port->data_ch;
575 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700576 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 gbam_epout_complete, GFP_ATOMIC);
578 if (ret) {
579 pr_err("%s: rx req allocation failed\n", __func__);
580 return;
581 }
582
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530583 spin_unlock_irqrestore(&port->port_lock_ul, flags);
584 spin_lock_irqsave(&port->port_lock_dl, flags);
Chiranjeevi Velempati44d02982013-02-17 22:09:08 +0530585 if (!port->port_usb) {
586 gbam_free_requests(ep, &d->rx_idle);
587 spin_unlock_irqrestore(&port->port_lock_dl, flags);
588 return;
589 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700591 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 gbam_epin_complete, GFP_ATOMIC);
593 if (ret) {
594 pr_err("%s: tx req allocation failed\n", __func__);
595 gbam_free_requests(ep, &d->rx_idle);
596 return;
597 }
598
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530599 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600
601 /* queue out requests */
602 gbam_start_rx(port);
603}
604
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600605static void gbam_notify(void *p, int event, unsigned long data)
606{
607 switch (event) {
608 case BAM_DMUX_RECEIVE:
609 gbam_data_recv_cb(p, (struct sk_buff *)(data));
610 break;
611 case BAM_DMUX_WRITE_DONE:
612 gbam_data_write_done(p, (struct sk_buff *)(data));
613 break;
614 }
615}
616
Ofir Cohena1c2a872011-12-14 10:26:34 +0200617static void gbam_free_buffers(struct gbam_port *port)
618{
619 struct sk_buff *skb;
620 unsigned long flags;
621 struct bam_ch_info *d;
622
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530623 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800624 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200625
626 if (!port || !port->port_usb)
627 goto free_buf_out;
628
629 d = &port->data_ch;
630
631 gbam_free_requests(port->port_usb->in, &d->tx_idle);
632 gbam_free_requests(port->port_usb->out, &d->rx_idle);
633
634 while ((skb = __skb_dequeue(&d->tx_skb_q)))
635 dev_kfree_skb_any(skb);
636
637 while ((skb = __skb_dequeue(&d->rx_skb_q)))
638 dev_kfree_skb_any(skb);
639
640free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800641 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530642 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200643}
644
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800645static void gbam_disconnect_work(struct work_struct *w)
646{
647 struct gbam_port *port =
648 container_of(w, struct gbam_port, disconnect_w);
649 struct bam_ch_info *d = &port->data_ch;
650
651 if (!test_bit(BAM_CH_OPENED, &d->flags))
652 return;
653
654 msm_bam_dmux_close(d->id);
655 clear_bit(BAM_CH_OPENED, &d->flags);
656}
657
Ofir Cohenfdecb602012-11-16 15:50:01 +0200658static void gbam2bam_disconnect_work(struct work_struct *w)
659{
Shimrit Malichi419fdac2013-01-16 14:35:31 +0200660 struct gbam_port *port =
661 container_of(w, struct gbam_port, disconnect_w);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200662 struct bam_ch_info *d = &port->data_ch;
663 int ret;
664
665 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohen77848d62012-12-05 13:16:10 +0200666 ret = usb_bam_disconnect_ipa(d->connection_idx, &d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200667 if (ret)
668 pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
669 __func__, ret);
670 rmnet_bridge_disconnect();
671 }
672}
673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674static void gbam_connect_work(struct work_struct *w)
675{
676 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
677 struct bam_ch_info *d = &port->data_ch;
678 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800679 unsigned long flags;
680
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530681 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800682 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800683 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800684 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530685 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800686 return;
687 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800688 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530689 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690
Jack Phameffd4ae2011-08-03 16:49:36 -0700691 if (!test_bit(BAM_CH_READY, &d->flags))
692 return;
693
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600694 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 if (ret) {
696 pr_err("%s: unable open bam ch:%d err:%d\n",
697 __func__, d->id, ret);
698 return;
699 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700700 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701
702 gbam_start_io(port);
703
704 pr_debug("%s: done\n", __func__);
705}
706
Ofir Cohena1c2a872011-12-14 10:26:34 +0200707static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700708{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200709 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
710 struct bam_ch_info *d = &port->data_ch;
711 u32 sps_params;
712 int ret;
Jack Phameffd4ae2011-08-03 16:49:36 -0700713
Ofir Cohenfdecb602012-11-16 15:50:01 +0200714 if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
715 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
716 &d->dst_pipe_idx);
717 if (ret) {
718 pr_err("%s: usb_bam_connect failed: err:%d\n",
719 __func__, ret);
720 return;
721 }
722 } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohen77848d62012-12-05 13:16:10 +0200723 d->ipa_params.client = IPA_CLIENT_USB_CONS;
724 d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
725 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200726 if (ret) {
727 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
728 __func__, ret);
729 return;
730 }
731
Ofir Cohen77848d62012-12-05 13:16:10 +0200732 d->ipa_params.client = IPA_CLIENT_USB_PROD;
733 d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200734 /* Currently only DMA mode is supported */
Ofir Cohen77848d62012-12-05 13:16:10 +0200735 d->ipa_params.ipa_ep_cfg.mode.mode = IPA_DMA;
736 d->ipa_params.ipa_ep_cfg.mode.dst =
Ofir Cohenfdecb602012-11-16 15:50:01 +0200737 IPA_CLIENT_A2_TETHERED_CONS;
Ofir Cohen77848d62012-12-05 13:16:10 +0200738 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200739 if (ret) {
740 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
741 __func__, ret);
742 return;
743 }
Ofir Cohen77848d62012-12-05 13:16:10 +0200744 rmnet_bridge_connect(d->ipa_params.prod_clnt_hdl,
745 d->ipa_params.cons_clnt_hdl, 0);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200746 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700747
Ofir Cohena1c2a872011-12-14 10:26:34 +0200748 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
749 if (!d->rx_req)
750 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700751
Ofir Cohena1c2a872011-12-14 10:26:34 +0200752 d->rx_req->context = port;
753 d->rx_req->complete = gbam_endless_rx_complete;
754 d->rx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200755 sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
756 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200757 d->rx_req->udc_priv = sps_params;
758 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
759 if (!d->tx_req)
760 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700761
Ofir Cohena1c2a872011-12-14 10:26:34 +0200762 d->tx_req->context = port;
763 d->tx_req->complete = gbam_endless_tx_complete;
764 d->tx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200765 sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
766 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200767 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700768
Ofir Cohena1c2a872011-12-14 10:26:34 +0200769 /* queue in & out requests */
770 gbam_start_endless_rx(port);
771 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700772
Amit Blay94525352012-12-24 11:23:27 +0200773 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) {
774 /* Register for peer reset callback */
775 usb_bam_register_peer_reset_cb(d->connection_idx,
776 gbam_peer_reset_cb, port);
777
778 ret = usb_bam_client_ready(true);
779 if (ret) {
780 pr_err("%s: usb_bam_client_ready failed: err:%d\n",
781 __func__, ret);
782 return;
783 }
784 }
785
Ofir Cohena1c2a872011-12-14 10:26:34 +0200786 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700787}
788
Amit Blay94525352012-12-24 11:23:27 +0200789static int gbam_peer_reset_cb(void *param)
790{
791 struct gbam_port *port = (struct gbam_port *)param;
792 struct bam_ch_info *d;
793 struct f_rmnet *dev;
794 struct usb_gadget *gadget;
795 int ret;
796 bool reenable_eps = false;
797
798 dev = port_to_rmnet(port->gr);
799 d = &port->data_ch;
800
801 gadget = dev->cdev->gadget;
802
803 pr_debug("%s: reset by peer\n", __func__);
804
805 /* Disable the relevant EPs if currently EPs are enabled */
806 if (port->port_usb && port->port_usb->in &&
807 port->port_usb->in->driver_data) {
808 usb_ep_disable(port->port_usb->out);
809 usb_ep_disable(port->port_usb->in);
810
811 port->port_usb->in->driver_data = NULL;
812 port->port_usb->out->driver_data = NULL;
813 reenable_eps = true;
814 }
815
816 /* Disable BAM */
817 msm_hw_bam_disable(1);
818
819 /* Reset BAM */
820 ret = usb_bam_reset();
821 if (ret) {
822 pr_err("%s: BAM reset failed %d\n", __func__, ret);
823 goto reenable_eps;
824 }
825
826 /* Enable BAM */
827 msm_hw_bam_disable(0);
828
829reenable_eps:
830 /* Re-Enable the relevant EPs, if EPs were originally enabled */
831 if (reenable_eps) {
832 ret = usb_ep_enable(port->port_usb->in);
833 if (ret) {
834 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
835 __func__, port->port_usb->in);
836 return ret;
837 }
838 port->port_usb->in->driver_data = port;
839
840 ret = usb_ep_enable(port->port_usb->out);
841 if (ret) {
842 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
843 __func__, port->port_usb->out);
844 port->port_usb->in->driver_data = 0;
845 return ret;
846 }
847 port->port_usb->out->driver_data = port;
848
849 gbam_start_endless_rx(port);
850 gbam_start_endless_tx(port);
851 }
852
853 /* Unregister the peer reset callback */
854 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0)
855 usb_bam_register_peer_reset_cb(d->connection_idx, NULL, NULL);
856
857 return 0;
858}
859
Jack Phameffd4ae2011-08-03 16:49:36 -0700860/* BAM data channel ready, allow attempt to open */
861static int gbam_data_ch_probe(struct platform_device *pdev)
862{
863 struct gbam_port *port;
864 struct bam_ch_info *d;
865 int i;
866 unsigned long flags;
867
868 pr_debug("%s: name:%s\n", __func__, pdev->name);
869
870 for (i = 0; i < n_bam_ports; i++) {
871 port = bam_ports[i].port;
872 d = &port->data_ch;
873
874 if (!strncmp(bam_ch_names[i], pdev->name,
875 BAM_DMUX_CH_NAME_MAX_LEN)) {
876 set_bit(BAM_CH_READY, &d->flags);
877
878 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530879 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800880 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700881 if (port->port_usb)
882 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800883 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530884 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700885
886 break;
887 }
888 }
889
890 return 0;
891}
892
893/* BAM data channel went inactive, so close it */
894static int gbam_data_ch_remove(struct platform_device *pdev)
895{
896 struct gbam_port *port;
897 struct bam_ch_info *d;
898 struct usb_ep *ep_in = NULL;
899 struct usb_ep *ep_out = NULL;
900 unsigned long flags;
901 int i;
902
903 pr_debug("%s: name:%s\n", __func__, pdev->name);
904
905 for (i = 0; i < n_bam_ports; i++) {
906 if (!strncmp(bam_ch_names[i], pdev->name,
907 BAM_DMUX_CH_NAME_MAX_LEN)) {
908 port = bam_ports[i].port;
909 d = &port->data_ch;
910
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530911 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800912 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700913 if (port->port_usb) {
914 ep_in = port->port_usb->in;
915 ep_out = port->port_usb->out;
916 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800917 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530918 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700919
920 if (ep_in)
921 usb_ep_fifo_flush(ep_in);
922 if (ep_out)
923 usb_ep_fifo_flush(ep_out);
924
925 gbam_free_buffers(port);
926
927 msm_bam_dmux_close(d->id);
928
Vamsi Krishna7658bd12012-01-13 10:32:00 -0800929 /* bam dmux will free all pending skbs */
930 d->pending_with_bam = 0;
931
Jack Phameffd4ae2011-08-03 16:49:36 -0700932 clear_bit(BAM_CH_READY, &d->flags);
933 clear_bit(BAM_CH_OPENED, &d->flags);
934 }
935 }
936
937 return 0;
938}
939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940static void gbam_port_free(int portno)
941{
942 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700943 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944
Jack Phameffd4ae2011-08-03 16:49:36 -0700945 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700947 platform_driver_unregister(pdrv);
948 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949}
950
Ofir Cohena1c2a872011-12-14 10:26:34 +0200951static void gbam2bam_port_free(int portno)
952{
953 struct gbam_port *port = bam2bam_ports[portno];
954
955 kfree(port);
956}
957
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958static int gbam_port_alloc(int portno)
959{
960 struct gbam_port *port;
961 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700962 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963
964 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
965 if (!port)
966 return -ENOMEM;
967
968 port->port_num = portno;
969
970 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530971 spin_lock_init(&port->port_lock_ul);
972 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800974 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
976 /* data ch */
977 d = &port->data_ch;
978 d->port = port;
979 INIT_LIST_HEAD(&d->tx_idle);
980 INIT_LIST_HEAD(&d->rx_idle);
981 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530982 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 skb_queue_head_init(&d->tx_skb_q);
984 skb_queue_head_init(&d->rx_skb_q);
985 d->id = bam_ch_ids[portno];
986
987 bam_ports[portno].port = port;
988
Jack Phameffd4ae2011-08-03 16:49:36 -0700989 pdrv = &bam_ports[portno].pdrv;
990 pdrv->probe = gbam_data_ch_probe;
991 pdrv->remove = gbam_data_ch_remove;
992 pdrv->driver.name = bam_ch_names[portno];
993 pdrv->driver.owner = THIS_MODULE;
994
995 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200996 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
997
998 return 0;
999}
1000
1001static int gbam2bam_port_alloc(int portno)
1002{
1003 struct gbam_port *port;
1004 struct bam_ch_info *d;
1005
1006 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
1007 if (!port)
1008 return -ENOMEM;
1009
1010 port->port_num = portno;
1011
1012 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301013 spin_lock_init(&port->port_lock_ul);
1014 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001015
1016 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
Ofir Cohenfdecb602012-11-16 15:50:01 +02001017 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001018
1019 /* data ch */
1020 d = &port->data_ch;
1021 d->port = port;
1022 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -07001023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1025
1026 return 0;
1027}
1028
1029#if defined(CONFIG_DEBUG_FS)
1030#define DEBUG_BUF_SIZE 1024
1031static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
1032 size_t count, loff_t *ppos)
1033{
1034 struct gbam_port *port;
1035 struct bam_ch_info *d;
1036 char *buf;
1037 unsigned long flags;
1038 int ret;
1039 int i;
1040 int temp = 0;
1041
1042 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
1043 if (!buf)
1044 return -ENOMEM;
1045
1046 for (i = 0; i < n_bam_ports; i++) {
1047 port = bam_ports[i].port;
1048 if (!port)
1049 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301050 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001051 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052
1053 d = &port->data_ch;
1054
1055 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
1056 "#PORT:%d port:%p data_ch:%p#\n"
1057 "dpkts_to_usbhost: %lu\n"
1058 "dpkts_to_modem: %lu\n"
1059 "dpkts_pwith_bam: %u\n"
1060 "to_usbhost_dcnt: %u\n"
1061 "tomodem__dcnt: %u\n"
1062 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -08001063 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -07001064 "data_ch_open: %d\n"
1065 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 i, port, &port->data_ch,
1067 d->to_host, d->to_modem,
1068 d->pending_with_bam,
1069 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -08001070 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -07001071 test_bit(BAM_CH_OPENED, &d->flags),
1072 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001074 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301075 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 }
1077
1078 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
1079
1080 kfree(buf);
1081
1082 return ret;
1083}
1084
1085static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
1086 size_t count, loff_t *ppos)
1087{
1088 struct gbam_port *port;
1089 struct bam_ch_info *d;
1090 int i;
1091 unsigned long flags;
1092
1093 for (i = 0; i < n_bam_ports; i++) {
1094 port = bam_ports[i].port;
1095 if (!port)
1096 continue;
1097
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301098 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001099 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100
1101 d = &port->data_ch;
1102
1103 d->to_host = 0;
1104 d->to_modem = 0;
1105 d->pending_with_bam = 0;
1106 d->tohost_drp_cnt = 0;
1107 d->tomodem_drp_cnt = 0;
1108
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001109 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301110 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 }
1112 return count;
1113}
1114
1115const struct file_operations gbam_stats_ops = {
1116 .read = gbam_read_stats,
1117 .write = gbam_reset_stats,
1118};
1119
1120static void gbam_debugfs_init(void)
1121{
1122 struct dentry *dent;
1123 struct dentry *dfile;
1124
1125 dent = debugfs_create_dir("usb_rmnet", 0);
1126 if (IS_ERR(dent))
1127 return;
1128
1129 /* TODO: Implement cleanup function to remove created file */
1130 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1131 if (!dfile || IS_ERR(dfile))
1132 debugfs_remove(dent);
1133}
1134#else
1135static void gam_debugfs_init(void) { }
1136#endif
1137
Ofir Cohen77848d62012-12-05 13:16:10 +02001138void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139{
1140 struct gbam_port *port;
1141 unsigned long flags;
1142 struct bam_ch_info *d;
1143
1144 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1145
Ofir Cohena1c2a872011-12-14 10:26:34 +02001146 if (trans == USB_GADGET_XPORT_BAM &&
1147 port_num >= n_bam_ports) {
1148 pr_err("%s: invalid bam portno#%d\n",
1149 __func__, port_num);
1150 return;
1151 }
1152
Ofir Cohenfdecb602012-11-16 15:50:01 +02001153 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1154 trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
Ofir Cohena1c2a872011-12-14 10:26:34 +02001155 port_num >= n_bam2bam_ports) {
1156 pr_err("%s: invalid bam2bam portno#%d\n",
1157 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 return;
1159 }
1160
1161 if (!gr) {
1162 pr_err("%s: grmnet port is null\n", __func__);
1163 return;
1164 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001165 if (trans == USB_GADGET_XPORT_BAM)
1166 port = bam_ports[port_num].port;
1167 else
1168 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001171 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001173 if (trans == USB_GADGET_XPORT_BAM)
Ofir Cohena1c2a872011-12-14 10:26:34 +02001174 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301176 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001177 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301178 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301179 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001180 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301181 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001183 /* disable endpoints */
1184 usb_ep_disable(gr->out);
1185 usb_ep_disable(gr->in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001187 gr->in->driver_data = NULL;
1188 gr->out->driver_data = NULL;
1189
Ofir Cohenfdecb602012-11-16 15:50:01 +02001190 if (trans == USB_GADGET_XPORT_BAM ||
1191 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001192 queue_work(gbam_wq, &port->disconnect_w);
Amit Blay94525352012-12-24 11:23:27 +02001193 else if (trans == USB_GADGET_XPORT_BAM2BAM) {
1194 if (port_num == 0) {
1195 if (usb_bam_client_ready(false)) {
1196 pr_err("%s: usb_bam_client_ready failed\n",
1197 __func__);
1198 }
1199 }
1200 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201}
1202
Ofir Cohena1c2a872011-12-14 10:26:34 +02001203int gbam_connect(struct grmnet *gr, u8 port_num,
Ofir Cohen77848d62012-12-05 13:16:10 +02001204 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205{
1206 struct gbam_port *port;
1207 struct bam_ch_info *d;
1208 int ret;
1209 unsigned long flags;
1210
1211 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1212
Ofir Cohena1c2a872011-12-14 10:26:34 +02001213 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1214 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1215 return -ENODEV;
1216 }
1217
Ofir Cohenfdecb602012-11-16 15:50:01 +02001218 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1219 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
1220 && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1222 return -ENODEV;
1223 }
1224
1225 if (!gr) {
1226 pr_err("%s: grmnet port is null\n", __func__);
1227 return -ENODEV;
1228 }
1229
Ofir Cohena1c2a872011-12-14 10:26:34 +02001230 if (trans == USB_GADGET_XPORT_BAM)
1231 port = bam_ports[port_num].port;
1232 else
1233 port = bam2bam_ports[port_num];
1234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235 d = &port->data_ch;
1236
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001237 ret = usb_ep_enable(gr->in);
1238 if (ret) {
1239 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1240 __func__, gr->in);
1241 return ret;
1242 }
1243 gr->in->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001244
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001245 ret = usb_ep_enable(gr->out);
1246 if (ret) {
1247 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1248 __func__, gr->out);
1249 gr->in->driver_data = 0;
1250 return ret;
1251 }
1252 gr->out->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001253
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301254 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001255 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301256 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001257
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001258 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohena1c2a872011-12-14 10:26:34 +02001259 d->to_host = 0;
1260 d->to_modem = 0;
1261 d->pending_with_bam = 0;
1262 d->tohost_drp_cnt = 0;
1263 d->tomodem_drp_cnt = 0;
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001264 }
1265
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001266 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301267 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268
Ofir Cohen4da266f2012-01-03 10:19:29 +02001269 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1270 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001271 d->connection_idx = connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +02001272 } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohenfdecb602012-11-16 15:50:01 +02001273 port->gr = gr;
Ofir Cohen77848d62012-12-05 13:16:10 +02001274 d->ipa_params.src_pipe = &(d->src_pipe_idx);
1275 d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
1276 d->ipa_params.idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001277 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278
Ofir Cohenfdecb602012-11-16 15:50:01 +02001279 d->trans = trans;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 queue_work(gbam_wq, &port->connect_w);
1281
1282 return 0;
1283}
1284
Ofir Cohena1c2a872011-12-14 10:26:34 +02001285int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001286{
1287 int i;
1288 int ret;
1289
Ofir Cohena1c2a872011-12-14 10:26:34 +02001290 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1291 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001292
Ofir Cohena1c2a872011-12-14 10:26:34 +02001293 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1294 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1295 pr_err("%s: Invalid num of ports count:%d,%d\n",
1296 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297 return -EINVAL;
1298 }
1299
1300 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1301 if (!gbam_wq) {
1302 pr_err("%s: Unable to create workqueue gbam_wq\n",
1303 __func__);
1304 return -ENOMEM;
1305 }
1306
Ofir Cohena1c2a872011-12-14 10:26:34 +02001307 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301308 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 ret = gbam_port_alloc(i);
1310 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301311 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1313 goto free_bam_ports;
1314 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 }
1316
Ofir Cohena1c2a872011-12-14 10:26:34 +02001317 for (i = 0; i < no_bam2bam_port; i++) {
1318 n_bam2bam_ports++;
1319 ret = gbam2bam_port_alloc(i);
1320 if (ret) {
1321 n_bam2bam_ports--;
1322 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1323 goto free_bam_ports;
1324 }
1325 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329free_bam_ports:
1330 for (i = 0; i < n_bam_ports; i++)
1331 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001332 for (i = 0; i < n_bam2bam_ports; i++)
1333 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 destroy_workqueue(gbam_wq);
1335
1336 return ret;
1337}
Amit Blaye5bb35e2012-05-08 20:38:20 +03001338
1339static int gbam_wake_cb(void *param)
1340{
1341 struct gbam_port *port = (struct gbam_port *)param;
1342 struct bam_ch_info *d;
1343 struct f_rmnet *dev;
1344
1345 dev = port_to_rmnet(port->gr);
1346 d = &port->data_ch;
1347
1348 pr_debug("%s: woken up by peer\n", __func__);
1349
1350 return usb_gadget_wakeup(dev->cdev->gadget);
1351}
1352
1353void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
1354{
1355 struct gbam_port *port;
1356 struct bam_ch_info *d;
1357
Ofir Cohenfdecb602012-11-16 15:50:01 +02001358 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1359 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001360 return;
1361
1362 port = bam2bam_ports[port_num];
1363 d = &port->data_ch;
1364
1365 pr_debug("%s: suspended port %d\n", __func__, port_num);
1366
1367 usb_bam_register_wake_cb(d->connection_idx, gbam_wake_cb, port);
1368}
1369
1370void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
1371{
1372 struct gbam_port *port;
1373 struct bam_ch_info *d;
1374
Ofir Cohenfdecb602012-11-16 15:50:01 +02001375 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1376 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001377 return;
1378
1379 port = bam2bam_ports[port_num];
1380 d = &port->data_ch;
1381
1382 pr_debug("%s: resumed port %d\n", __func__, port_num);
1383
1384 usb_bam_register_wake_cb(d->connection_idx, NULL, NULL);
1385}