blob: 9dd9978c160774a0b0ce95914dcda32c5e35e1ba [file] [log] [blame]
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +05301/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
Shimrit Malichi194fe122012-07-25 13:50:41 +030027#include <linux/usb/msm_hsusb.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020028#include <mach/usb_bam.h>
29
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include "u_rmnet.h"
31
32#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020033#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static struct workqueue_struct *gbam_wq;
36static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020037static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053038static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039static unsigned bam_ch_ids[] = { 8 };
40
Jack Phameffd4ae2011-08-03 16:49:36 -070041static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
42
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080045#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
46#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#define BAM_MUX_HDR 8
50
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051#define BAM_MUX_RX_Q_SIZE 16
52#define BAM_MUX_TX_Q_SIZE 200
Manu Gautam15203302012-09-26 11:12:54 +053053#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053055#define DL_INTR_THRESHOLD 20
56
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
58module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
61module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
64module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
67module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Vamsi Krishna8f24f252011-11-02 11:46:08 -070069unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
70module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
Vamsi Krishna8f24f252011-11-02 11:46:08 -070072unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
73module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Vamsi Krishna8f24f252011-11-02 11:46:08 -070075unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
76module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053078unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
79module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
80
Jack Phameffd4ae2011-08-03 16:49:36 -070081#define BAM_CH_OPENED BIT(0)
82#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070085 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 unsigned id;
87
88 struct list_head tx_idle;
89 struct sk_buff_head tx_skb_q;
90
91 struct list_head rx_idle;
92 struct sk_buff_head rx_skb_q;
93
94 struct gbam_port *port;
95 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053096 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
Ofir Cohena1c2a872011-12-14 10:26:34 +020098 struct usb_request *rx_req;
99 struct usb_request *tx_req;
100
Shimrit Malichi255b5342012-08-02 21:01:43 +0300101 u32 src_pipe_idx;
102 u32 dst_pipe_idx;
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200103 u8 src_connection_idx;
104 u8 dst_connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200105 enum transport_type trans;
Ofir Cohen77848d62012-12-05 13:16:10 +0200106 struct usb_bam_connect_ipa_params ipa_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 /* stats */
109 unsigned int pending_with_bam;
110 unsigned int tohost_drp_cnt;
111 unsigned int tomodem_drp_cnt;
112 unsigned int tx_len;
113 unsigned int rx_len;
114 unsigned long to_modem;
115 unsigned long to_host;
116};
117
118struct gbam_port {
119 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530120 spinlock_t port_lock_ul;
121 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122
123 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200124 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125
126 struct bam_ch_info data_ch;
127
128 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800129 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130};
131
132static struct bam_portmaster {
133 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700134 struct platform_driver pdrv;
135} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136
Ofir Cohena1c2a872011-12-14 10:26:34 +0200137struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200139static void gbam_start_endless_rx(struct gbam_port *port);
140static void gbam_start_endless_tx(struct gbam_port *port);
Amit Blay94525352012-12-24 11:23:27 +0200141static int gbam_peer_reset_cb(void *param);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142
143/*---------------misc functions---------------- */
144static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
145{
146 struct usb_request *req;
147
148 while (!list_empty(head)) {
149 req = list_entry(head->next, struct usb_request, list);
150 list_del(&req->list);
151 usb_ep_free_request(ep, req);
152 }
153}
154
155static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
156 int num,
157 void (*cb)(struct usb_ep *ep, struct usb_request *),
158 gfp_t flags)
159{
160 int i;
161 struct usb_request *req;
162
163 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
164 ep, head, num, cb);
165
166 for (i = 0; i < num; i++) {
167 req = usb_ep_alloc_request(ep, flags);
168 if (!req) {
169 pr_debug("%s: req allocated:%d\n", __func__, i);
170 return list_empty(head) ? -ENOMEM : 0;
171 }
172 req->complete = cb;
173 list_add(&req->list, head);
174 }
175
176 return 0;
177}
178/*--------------------------------------------- */
179
180/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530181static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182{
183 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530184 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 struct sk_buff *skb;
186 int ret;
187 struct usb_request *req;
188 struct usb_ep *ep;
189
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530190 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530192 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 return;
194 }
195
196 ep = port->port_usb->in;
197
198 while (!list_empty(&d->tx_idle)) {
199 skb = __skb_dequeue(&d->tx_skb_q);
200 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530201 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 return;
203 }
204 req = list_first_entry(&d->tx_idle,
205 struct usb_request,
206 list);
207 req->context = skb;
208 req->buf = skb->data;
209 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530210 n_tx_req_queued++;
211 if (n_tx_req_queued == dl_intr_threshold) {
212 req->no_interrupt = 0;
213 n_tx_req_queued = 0;
214 } else {
215 req->no_interrupt = 1;
216 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217
218 list_del(&req->list);
219
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530220 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530222 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 if (ret) {
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530224 pr_err("%s: usb epIn failed with %d\n", __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 list_add(&req->list, &d->tx_idle);
226 dev_kfree_skb_any(skb);
227 break;
228 }
229 d->to_host++;
230 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530231 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232}
233
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530234static void gbam_write_data_tohost_w(struct work_struct *w)
235{
236 struct bam_ch_info *d;
237 struct gbam_port *port;
238
239 d = container_of(w, struct bam_ch_info, write_tohost_w);
240 port = d->port;
241
242 gbam_write_data_tohost(port);
243}
244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245void gbam_data_recv_cb(void *p, struct sk_buff *skb)
246{
247 struct gbam_port *port = p;
248 struct bam_ch_info *d = &port->data_ch;
249 unsigned long flags;
250
251 if (!skb)
252 return;
253
254 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
255 port, port->port_num, d, skb->len);
256
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530257 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530259 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 dev_kfree_skb_any(skb);
261 return;
262 }
263
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700264 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 d->tohost_drp_cnt++;
266 if (printk_ratelimit())
267 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
268 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530269 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270 dev_kfree_skb_any(skb);
271 return;
272 }
273
274 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530275 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530277 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278}
279
280void gbam_data_write_done(void *p, struct sk_buff *skb)
281{
282 struct gbam_port *port = p;
283 struct bam_ch_info *d = &port->data_ch;
284 unsigned long flags;
285
286 if (!skb)
287 return;
288
289 dev_kfree_skb_any(skb);
290
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530291 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292
293 d->pending_with_bam--;
294
295 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
296 port, d, d->to_modem,
297 d->pending_with_bam, port->port_num);
298
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530299 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300
Vamsi Krishna84579552011-11-09 15:33:22 -0800301 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302}
303
304static void gbam_data_write_tobam(struct work_struct *w)
305{
306 struct gbam_port *port;
307 struct bam_ch_info *d;
308 struct sk_buff *skb;
309 unsigned long flags;
310 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800311 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312
313 d = container_of(w, struct bam_ch_info, write_tobam_w);
314 port = d->port;
315
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530316 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530318 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 return;
320 }
321
Vamsi Krishna84579552011-11-09 15:33:22 -0800322 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800323 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800324 if (!skb)
325 break;
326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 d->pending_with_bam++;
328 d->to_modem++;
329
330 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
331 port, d, d->to_modem, d->pending_with_bam,
332 port->port_num);
333
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530334 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530336 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 if (ret) {
338 pr_debug("%s: write error:%d\n", __func__, ret);
339 d->pending_with_bam--;
340 d->to_modem--;
341 d->tomodem_drp_cnt++;
342 dev_kfree_skb_any(skb);
343 break;
344 }
345 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800346
347 qlen = d->rx_skb_q.qlen;
348
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530349 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800350
351 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
352 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353}
354/*-------------------------------------------------------------*/
355
356static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
357{
358 struct gbam_port *port = ep->driver_data;
359 struct bam_ch_info *d;
360 struct sk_buff *skb = req->context;
361 int status = req->status;
362
363 switch (status) {
364 case 0:
365 /* successful completion */
Manu Gautam06277e42013-04-01 15:07:29 +0530366 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 case -ECONNRESET:
368 case -ESHUTDOWN:
369 /* connection gone */
Manu Gautam06277e42013-04-01 15:07:29 +0530370 dev_kfree_skb_any(skb);
371 usb_ep_free_request(ep, req);
372 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 default:
374 pr_err("%s: data tx ep error %d\n",
375 __func__, status);
376 break;
377 }
378
379 dev_kfree_skb_any(skb);
380
381 if (!port)
382 return;
383
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530384 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 d = &port->data_ch;
386 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530387 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530389 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390}
391
392static void
393gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
394{
395 struct gbam_port *port = ep->driver_data;
396 struct bam_ch_info *d = &port->data_ch;
397 struct sk_buff *skb = req->context;
398 int status = req->status;
399 int queue = 0;
400
401 switch (status) {
402 case 0:
403 skb_put(skb, req->actual);
404 queue = 1;
405 break;
406 case -ECONNRESET:
407 case -ESHUTDOWN:
408 /* cable disconnection */
409 dev_kfree_skb_any(skb);
410 req->buf = 0;
411 usb_ep_free_request(ep, req);
412 return;
413 default:
414 if (printk_ratelimit())
415 pr_err("%s: %s response error %d, %d/%d\n",
416 __func__, ep->name, status,
417 req->actual, req->length);
418 dev_kfree_skb_any(skb);
419 break;
420 }
421
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530422 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 if (queue) {
424 __skb_queue_tail(&d->rx_skb_q, skb);
425 queue_work(gbam_wq, &d->write_tobam_w);
426 }
427
428 /* TODO: Handle flow control gracefully by having
429 * having call back mechanism from bam driver
430 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700431 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800432 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530435 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 return;
437 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530438 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700440 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530442 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530444 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445 return;
446 }
447 skb_reserve(skb, BAM_MUX_HDR);
448
449 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700450 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 req->context = skb;
452
453 status = usb_ep_queue(ep, req, GFP_ATOMIC);
454 if (status) {
455 dev_kfree_skb_any(skb);
456
457 if (printk_ratelimit())
458 pr_err("%s: data rx enqueue err %d\n",
459 __func__, status);
460
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530461 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530463 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 }
465}
466
Ofir Cohena1c2a872011-12-14 10:26:34 +0200467static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
468{
469 int status = req->status;
470
471 pr_debug("%s status: %d\n", __func__, status);
472}
473
474static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
475{
476 int status = req->status;
477
478 pr_debug("%s status: %d\n", __func__, status);
479}
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481static void gbam_start_rx(struct gbam_port *port)
482{
483 struct usb_request *req;
484 struct bam_ch_info *d;
485 struct usb_ep *ep;
486 unsigned long flags;
487 int ret;
488 struct sk_buff *skb;
489
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530490 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530492 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 return;
494 }
495
496 d = &port->data_ch;
497 ep = port->port_usb->out;
498
499 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800500
501 if (bam_mux_rx_fctrl_support &&
502 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
503 break;
504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 req = list_first_entry(&d->rx_idle, struct usb_request, list);
506
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700507 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 if (!skb)
509 break;
510 skb_reserve(skb, BAM_MUX_HDR);
511
512 list_del(&req->list);
513 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700514 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 req->context = skb;
516
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530517 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530519 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 if (ret) {
521 dev_kfree_skb_any(skb);
522
523 if (printk_ratelimit())
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530524 pr_err("%s: rx queue failed %d\n",
525 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526
527 if (port->port_usb)
528 list_add(&req->list, &d->rx_idle);
529 else
530 usb_ep_free_request(ep, req);
531 break;
532 }
533 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530534 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535}
536
Ofir Cohena1c2a872011-12-14 10:26:34 +0200537static void gbam_start_endless_rx(struct gbam_port *port)
538{
539 struct bam_ch_info *d = &port->data_ch;
540 int status;
541
Lena Salman05b544f2013-05-13 15:49:10 +0300542 if (!port->port_usb) {
543 pr_err("%s: port->port_usb is NULL", __func__);
Amit Blay94525352012-12-24 11:23:27 +0200544 return;
Lena Salman05b544f2013-05-13 15:49:10 +0300545 }
Amit Blay94525352012-12-24 11:23:27 +0200546
Lena Salman05b544f2013-05-13 15:49:10 +0300547 pr_debug("%s: enqueue\n", __func__);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200548 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
549 if (status)
550 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
551}
552
553static void gbam_start_endless_tx(struct gbam_port *port)
554{
555 struct bam_ch_info *d = &port->data_ch;
556 int status;
557
Lena Salman05b544f2013-05-13 15:49:10 +0300558 if (!port->port_usb) {
559 pr_err("%s: port->port_usb is NULL", __func__);
Amit Blay94525352012-12-24 11:23:27 +0200560 return;
Lena Salman05b544f2013-05-13 15:49:10 +0300561 }
Amit Blay94525352012-12-24 11:23:27 +0200562
Lena Salman05b544f2013-05-13 15:49:10 +0300563 pr_debug("%s: enqueue\n", __func__);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200564 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
565 if (status)
566 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
567}
568
Lena Salman05b544f2013-05-13 15:49:10 +0300569static void gbam_stop_endless_rx(struct gbam_port *port)
570{
571 struct bam_ch_info *d = &port->data_ch;
572 int status;
573
574 if (!port->port_usb) {
575 pr_err("%s: port->port_usb is NULL", __func__);
576 return;
577 }
578 pr_debug("%s: dequeue\n", __func__);
579
580 status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
581 if (status)
582 pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
583
584}
585static void gbam_stop_endless_tx(struct gbam_port *port)
586{
587 struct bam_ch_info *d = &port->data_ch;
588 int status;
589
590 if (!port->port_usb) {
591 pr_err("%s: port->port_usb is NULL", __func__);
592 return;
593 }
594
595 pr_debug("%s: dequeue\n", __func__);
596 status = usb_ep_dequeue(port->port_usb->in, d->tx_req);
597 if (status)
598 pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
599}
600
601static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
602{
603 struct gbam_port *port = param;
604
605 if (dir == USB_TO_PEER_PERIPHERAL)
606 gbam_start_endless_rx(port);
607 else
608 gbam_start_endless_tx(port);
609}
610
611static void gbam_stop(void *param, enum usb_bam_pipe_dir dir)
612{
613 struct gbam_port *port = param;
614
615 if (dir == USB_TO_PEER_PERIPHERAL)
616 gbam_stop_endless_rx(port);
617 else
618 gbam_stop_endless_tx(port);
619}
620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621static void gbam_start_io(struct gbam_port *port)
622{
623 unsigned long flags;
624 struct usb_ep *ep;
625 int ret;
626 struct bam_ch_info *d;
627
628 pr_debug("%s: port:%p\n", __func__, port);
629
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530630 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530632 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 return;
634 }
635
636 d = &port->data_ch;
637 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700638 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 gbam_epout_complete, GFP_ATOMIC);
640 if (ret) {
641 pr_err("%s: rx req allocation failed\n", __func__);
642 return;
643 }
644
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530645 spin_unlock_irqrestore(&port->port_lock_ul, flags);
646 spin_lock_irqsave(&port->port_lock_dl, flags);
Chiranjeevi Velempati44d02982013-02-17 22:09:08 +0530647 if (!port->port_usb) {
648 gbam_free_requests(ep, &d->rx_idle);
649 spin_unlock_irqrestore(&port->port_lock_dl, flags);
650 return;
651 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700653 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 gbam_epin_complete, GFP_ATOMIC);
655 if (ret) {
656 pr_err("%s: tx req allocation failed\n", __func__);
657 gbam_free_requests(ep, &d->rx_idle);
658 return;
659 }
660
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530661 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662
663 /* queue out requests */
664 gbam_start_rx(port);
665}
666
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600667static void gbam_notify(void *p, int event, unsigned long data)
668{
669 switch (event) {
670 case BAM_DMUX_RECEIVE:
671 gbam_data_recv_cb(p, (struct sk_buff *)(data));
672 break;
673 case BAM_DMUX_WRITE_DONE:
674 gbam_data_write_done(p, (struct sk_buff *)(data));
675 break;
676 }
677}
678
Ofir Cohena1c2a872011-12-14 10:26:34 +0200679static void gbam_free_buffers(struct gbam_port *port)
680{
681 struct sk_buff *skb;
682 unsigned long flags;
683 struct bam_ch_info *d;
684
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530685 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800686 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200687
688 if (!port || !port->port_usb)
689 goto free_buf_out;
690
691 d = &port->data_ch;
692
693 gbam_free_requests(port->port_usb->in, &d->tx_idle);
694 gbam_free_requests(port->port_usb->out, &d->rx_idle);
695
696 while ((skb = __skb_dequeue(&d->tx_skb_q)))
697 dev_kfree_skb_any(skb);
698
699 while ((skb = __skb_dequeue(&d->rx_skb_q)))
700 dev_kfree_skb_any(skb);
701
702free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800703 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530704 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200705}
706
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800707static void gbam_disconnect_work(struct work_struct *w)
708{
709 struct gbam_port *port =
710 container_of(w, struct gbam_port, disconnect_w);
711 struct bam_ch_info *d = &port->data_ch;
712
713 if (!test_bit(BAM_CH_OPENED, &d->flags))
714 return;
715
716 msm_bam_dmux_close(d->id);
717 clear_bit(BAM_CH_OPENED, &d->flags);
718}
719
Ofir Cohenfdecb602012-11-16 15:50:01 +0200720static void gbam2bam_disconnect_work(struct work_struct *w)
721{
Shimrit Malichi419fdac2013-01-16 14:35:31 +0200722 struct gbam_port *port =
723 container_of(w, struct gbam_port, disconnect_w);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200724 struct bam_ch_info *d = &port->data_ch;
725 int ret;
726
727 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Lena Salmanabde35d2013-04-25 15:29:43 +0300728 teth_bridge_disconnect();
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200729 ret = usb_bam_disconnect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200730 if (ret)
731 pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
732 __func__, ret);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200733 }
734}
735
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736static void gbam_connect_work(struct work_struct *w)
737{
738 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
739 struct bam_ch_info *d = &port->data_ch;
740 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800741 unsigned long flags;
742
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530743 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800744 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800745 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800746 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530747 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800748 return;
749 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800750 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530751 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752
Jack Phameffd4ae2011-08-03 16:49:36 -0700753 if (!test_bit(BAM_CH_READY, &d->flags))
754 return;
755
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600756 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 if (ret) {
758 pr_err("%s: unable open bam ch:%d err:%d\n",
759 __func__, d->id, ret);
760 return;
761 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700762 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763
764 gbam_start_io(port);
765
766 pr_debug("%s: done\n", __func__);
767}
768
Ofir Cohena1c2a872011-12-14 10:26:34 +0200769static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700770{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200771 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200772 struct teth_bridge_connect_params connect_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200773 struct bam_ch_info *d = &port->data_ch;
774 u32 sps_params;
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200775 ipa_notify_cb usb_notify_cb;
776 void *priv;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200777 int ret;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300778 unsigned long flags;
Jack Phameffd4ae2011-08-03 16:49:36 -0700779
Ofir Cohenfdecb602012-11-16 15:50:01 +0200780 if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
Bar Weiner189bb3c2013-06-09 14:24:56 +0300781 usb_bam_reset_complete();
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200782 ret = usb_bam_connect(d->src_connection_idx, &d->src_pipe_idx);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200783 if (ret) {
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200784 pr_err("%s: usb_bam_connect (src) failed: err:%d\n",
785 __func__, ret);
786 return;
787 }
788 ret = usb_bam_connect(d->dst_connection_idx, &d->dst_pipe_idx);
789 if (ret) {
790 pr_err("%s: usb_bam_connect (dst) failed: err:%d\n",
Ofir Cohenfdecb602012-11-16 15:50:01 +0200791 __func__, ret);
792 return;
793 }
794 } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200795 ret = teth_bridge_init(&usb_notify_cb, &priv);
796 if (ret) {
797 pr_err("%s:teth_bridge_init() failed\n", __func__);
798 return;
799 }
800 d->ipa_params.notify = usb_notify_cb;
801 d->ipa_params.priv = priv;
802 d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
803
Lena Salmanabde35d2013-04-25 15:29:43 +0300804 d->ipa_params.client = IPA_CLIENT_USB_PROD;
805 d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
Ofir Cohen77848d62012-12-05 13:16:10 +0200806 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200807 if (ret) {
808 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
809 __func__, ret);
810 return;
811 }
812
Lena Salmanabde35d2013-04-25 15:29:43 +0300813 d->ipa_params.client = IPA_CLIENT_USB_CONS;
814 d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
Ofir Cohen77848d62012-12-05 13:16:10 +0200815 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200816 if (ret) {
817 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
818 __func__, ret);
819 return;
820 }
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200821
822 connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
823 connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
824 connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
825 ret = teth_bridge_connect(&connect_params);
826 if (ret) {
827 pr_err("%s:teth_bridge_connect() failed\n", __func__);
828 return;
829 }
Ofir Cohena1c2a872011-12-14 10:26:34 +0200830 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700831
Shimrit Malichi841fe172013-04-23 11:52:19 +0300832 spin_lock_irqsave(&port->port_lock_ul, flags);
833 spin_lock(&port->port_lock_dl);
834 if (!port->port_usb) {
835 pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
836 spin_unlock(&port->port_lock_dl);
837 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200838 return;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300839 }
840 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_ATOMIC);
841 if (!d->rx_req) {
842 spin_unlock(&port->port_lock_dl);
843 spin_unlock_irqrestore(&port->port_lock_ul, flags);
844 pr_err("%s: out of memory\n", __func__);
845 return;
846 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700847
Ofir Cohena1c2a872011-12-14 10:26:34 +0200848 d->rx_req->context = port;
849 d->rx_req->complete = gbam_endless_rx_complete;
850 d->rx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200851 sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
852 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200853 d->rx_req->udc_priv = sps_params;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300854
855 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_ATOMIC);
856 spin_unlock(&port->port_lock_dl);
857 spin_unlock_irqrestore(&port->port_lock_ul, flags);
858 if (!d->tx_req) {
859 pr_err("%s: out of memory\n", __func__);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200860 return;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300861 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700862
Ofir Cohena1c2a872011-12-14 10:26:34 +0200863 d->tx_req->context = port;
864 d->tx_req->complete = gbam_endless_tx_complete;
865 d->tx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200866 sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
867 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200868 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700869
Ofir Cohena1c2a872011-12-14 10:26:34 +0200870 /* queue in & out requests */
871 gbam_start_endless_rx(port);
872 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700873
Amit Blay94525352012-12-24 11:23:27 +0200874 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) {
875 /* Register for peer reset callback */
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200876 usb_bam_register_peer_reset_cb(gbam_peer_reset_cb, port);
Amit Blay94525352012-12-24 11:23:27 +0200877
878 ret = usb_bam_client_ready(true);
879 if (ret) {
880 pr_err("%s: usb_bam_client_ready failed: err:%d\n",
881 __func__, ret);
882 return;
883 }
884 }
885
Ofir Cohena1c2a872011-12-14 10:26:34 +0200886 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700887}
888
Amit Blay94525352012-12-24 11:23:27 +0200889static int gbam_peer_reset_cb(void *param)
890{
891 struct gbam_port *port = (struct gbam_port *)param;
892 struct bam_ch_info *d;
893 struct f_rmnet *dev;
894 struct usb_gadget *gadget;
895 int ret;
896 bool reenable_eps = false;
897
898 dev = port_to_rmnet(port->gr);
899 d = &port->data_ch;
900
901 gadget = dev->cdev->gadget;
902
903 pr_debug("%s: reset by peer\n", __func__);
904
905 /* Disable the relevant EPs if currently EPs are enabled */
906 if (port->port_usb && port->port_usb->in &&
907 port->port_usb->in->driver_data) {
908 usb_ep_disable(port->port_usb->out);
909 usb_ep_disable(port->port_usb->in);
910
911 port->port_usb->in->driver_data = NULL;
912 port->port_usb->out->driver_data = NULL;
913 reenable_eps = true;
914 }
915
916 /* Disable BAM */
917 msm_hw_bam_disable(1);
918
919 /* Reset BAM */
Bar Weiner189bb3c2013-06-09 14:24:56 +0300920 ret = usb_bam_a2_reset(0);
Amit Blay94525352012-12-24 11:23:27 +0200921 if (ret) {
922 pr_err("%s: BAM reset failed %d\n", __func__, ret);
923 goto reenable_eps;
924 }
925
926 /* Enable BAM */
927 msm_hw_bam_disable(0);
928
929reenable_eps:
930 /* Re-Enable the relevant EPs, if EPs were originally enabled */
931 if (reenable_eps) {
932 ret = usb_ep_enable(port->port_usb->in);
933 if (ret) {
934 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
935 __func__, port->port_usb->in);
936 return ret;
937 }
938 port->port_usb->in->driver_data = port;
939
940 ret = usb_ep_enable(port->port_usb->out);
941 if (ret) {
942 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
943 __func__, port->port_usb->out);
944 port->port_usb->in->driver_data = 0;
945 return ret;
946 }
947 port->port_usb->out->driver_data = port;
948
949 gbam_start_endless_rx(port);
950 gbam_start_endless_tx(port);
951 }
952
953 /* Unregister the peer reset callback */
954 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0)
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200955 usb_bam_register_peer_reset_cb(NULL, NULL);
Amit Blay94525352012-12-24 11:23:27 +0200956
957 return 0;
958}
959
Jack Phameffd4ae2011-08-03 16:49:36 -0700960/* BAM data channel ready, allow attempt to open */
961static int gbam_data_ch_probe(struct platform_device *pdev)
962{
963 struct gbam_port *port;
964 struct bam_ch_info *d;
965 int i;
966 unsigned long flags;
967
968 pr_debug("%s: name:%s\n", __func__, pdev->name);
969
970 for (i = 0; i < n_bam_ports; i++) {
971 port = bam_ports[i].port;
972 d = &port->data_ch;
973
974 if (!strncmp(bam_ch_names[i], pdev->name,
975 BAM_DMUX_CH_NAME_MAX_LEN)) {
976 set_bit(BAM_CH_READY, &d->flags);
977
978 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530979 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800980 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700981 if (port->port_usb)
982 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800983 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530984 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700985
986 break;
987 }
988 }
989
990 return 0;
991}
992
993/* BAM data channel went inactive, so close it */
994static int gbam_data_ch_remove(struct platform_device *pdev)
995{
996 struct gbam_port *port;
997 struct bam_ch_info *d;
998 struct usb_ep *ep_in = NULL;
999 struct usb_ep *ep_out = NULL;
1000 unsigned long flags;
1001 int i;
1002
1003 pr_debug("%s: name:%s\n", __func__, pdev->name);
1004
1005 for (i = 0; i < n_bam_ports; i++) {
1006 if (!strncmp(bam_ch_names[i], pdev->name,
1007 BAM_DMUX_CH_NAME_MAX_LEN)) {
1008 port = bam_ports[i].port;
1009 d = &port->data_ch;
1010
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301011 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001012 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -07001013 if (port->port_usb) {
1014 ep_in = port->port_usb->in;
1015 ep_out = port->port_usb->out;
1016 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001017 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301018 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -07001019
1020 if (ep_in)
1021 usb_ep_fifo_flush(ep_in);
1022 if (ep_out)
1023 usb_ep_fifo_flush(ep_out);
1024
1025 gbam_free_buffers(port);
1026
1027 msm_bam_dmux_close(d->id);
1028
Vamsi Krishna7658bd12012-01-13 10:32:00 -08001029 /* bam dmux will free all pending skbs */
1030 d->pending_with_bam = 0;
1031
Jack Phameffd4ae2011-08-03 16:49:36 -07001032 clear_bit(BAM_CH_READY, &d->flags);
1033 clear_bit(BAM_CH_OPENED, &d->flags);
1034 }
1035 }
1036
1037 return 0;
1038}
1039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040static void gbam_port_free(int portno)
1041{
1042 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -07001043 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044
Jack Phameffd4ae2011-08-03 16:49:36 -07001045 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -07001047 platform_driver_unregister(pdrv);
1048 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049}
1050
Ofir Cohena1c2a872011-12-14 10:26:34 +02001051static void gbam2bam_port_free(int portno)
1052{
1053 struct gbam_port *port = bam2bam_ports[portno];
1054
1055 kfree(port);
1056}
1057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058static int gbam_port_alloc(int portno)
1059{
1060 struct gbam_port *port;
1061 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -07001062 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063
1064 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
1065 if (!port)
1066 return -ENOMEM;
1067
1068 port->port_num = portno;
1069
1070 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301071 spin_lock_init(&port->port_lock_ul);
1072 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001074 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075
1076 /* data ch */
1077 d = &port->data_ch;
1078 d->port = port;
1079 INIT_LIST_HEAD(&d->tx_idle);
1080 INIT_LIST_HEAD(&d->rx_idle);
1081 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +05301082 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083 skb_queue_head_init(&d->tx_skb_q);
1084 skb_queue_head_init(&d->rx_skb_q);
1085 d->id = bam_ch_ids[portno];
1086
1087 bam_ports[portno].port = port;
1088
Jack Phameffd4ae2011-08-03 16:49:36 -07001089 pdrv = &bam_ports[portno].pdrv;
1090 pdrv->probe = gbam_data_ch_probe;
1091 pdrv->remove = gbam_data_ch_remove;
1092 pdrv->driver.name = bam_ch_names[portno];
1093 pdrv->driver.owner = THIS_MODULE;
1094
1095 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001096 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1097
1098 return 0;
1099}
1100
1101static int gbam2bam_port_alloc(int portno)
1102{
1103 struct gbam_port *port;
1104 struct bam_ch_info *d;
1105
1106 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
1107 if (!port)
1108 return -ENOMEM;
1109
1110 port->port_num = portno;
1111
1112 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301113 spin_lock_init(&port->port_lock_ul);
1114 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001115
1116 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
Ofir Cohenfdecb602012-11-16 15:50:01 +02001117 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001118
1119 /* data ch */
1120 d = &port->data_ch;
1121 d->port = port;
1122 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -07001123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1125
1126 return 0;
1127}
1128
1129#if defined(CONFIG_DEBUG_FS)
1130#define DEBUG_BUF_SIZE 1024
1131static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
1132 size_t count, loff_t *ppos)
1133{
1134 struct gbam_port *port;
1135 struct bam_ch_info *d;
1136 char *buf;
1137 unsigned long flags;
1138 int ret;
1139 int i;
1140 int temp = 0;
1141
1142 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
1143 if (!buf)
1144 return -ENOMEM;
1145
1146 for (i = 0; i < n_bam_ports; i++) {
1147 port = bam_ports[i].port;
1148 if (!port)
1149 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301150 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001151 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152
1153 d = &port->data_ch;
1154
1155 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
1156 "#PORT:%d port:%p data_ch:%p#\n"
1157 "dpkts_to_usbhost: %lu\n"
1158 "dpkts_to_modem: %lu\n"
1159 "dpkts_pwith_bam: %u\n"
1160 "to_usbhost_dcnt: %u\n"
1161 "tomodem__dcnt: %u\n"
1162 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -08001163 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -07001164 "data_ch_open: %d\n"
1165 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 i, port, &port->data_ch,
1167 d->to_host, d->to_modem,
1168 d->pending_with_bam,
1169 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -08001170 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -07001171 test_bit(BAM_CH_OPENED, &d->flags),
1172 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001174 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301175 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 }
1177
1178 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
1179
1180 kfree(buf);
1181
1182 return ret;
1183}
1184
1185static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
1186 size_t count, loff_t *ppos)
1187{
1188 struct gbam_port *port;
1189 struct bam_ch_info *d;
1190 int i;
1191 unsigned long flags;
1192
1193 for (i = 0; i < n_bam_ports; i++) {
1194 port = bam_ports[i].port;
1195 if (!port)
1196 continue;
1197
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301198 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001199 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200
1201 d = &port->data_ch;
1202
1203 d->to_host = 0;
1204 d->to_modem = 0;
1205 d->pending_with_bam = 0;
1206 d->tohost_drp_cnt = 0;
1207 d->tomodem_drp_cnt = 0;
1208
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001209 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301210 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211 }
1212 return count;
1213}
1214
1215const struct file_operations gbam_stats_ops = {
1216 .read = gbam_read_stats,
1217 .write = gbam_reset_stats,
1218};
1219
1220static void gbam_debugfs_init(void)
1221{
1222 struct dentry *dent;
1223 struct dentry *dfile;
1224
1225 dent = debugfs_create_dir("usb_rmnet", 0);
1226 if (IS_ERR(dent))
1227 return;
1228
1229 /* TODO: Implement cleanup function to remove created file */
1230 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1231 if (!dfile || IS_ERR(dfile))
1232 debugfs_remove(dent);
1233}
1234#else
1235static void gam_debugfs_init(void) { }
1236#endif
1237
Ofir Cohen77848d62012-12-05 13:16:10 +02001238void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239{
1240 struct gbam_port *port;
1241 unsigned long flags;
1242 struct bam_ch_info *d;
1243
1244 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1245
Ofir Cohena1c2a872011-12-14 10:26:34 +02001246 if (trans == USB_GADGET_XPORT_BAM &&
1247 port_num >= n_bam_ports) {
1248 pr_err("%s: invalid bam portno#%d\n",
1249 __func__, port_num);
1250 return;
1251 }
1252
Ofir Cohenfdecb602012-11-16 15:50:01 +02001253 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1254 trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
Ofir Cohena1c2a872011-12-14 10:26:34 +02001255 port_num >= n_bam2bam_ports) {
1256 pr_err("%s: invalid bam2bam portno#%d\n",
1257 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 return;
1259 }
1260
1261 if (!gr) {
1262 pr_err("%s: grmnet port is null\n", __func__);
1263 return;
1264 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001265 if (trans == USB_GADGET_XPORT_BAM)
1266 port = bam_ports[port_num].port;
1267 else
1268 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001271 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001273 if (trans == USB_GADGET_XPORT_BAM)
Ofir Cohena1c2a872011-12-14 10:26:34 +02001274 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301276 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001277 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301278 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301279 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001280 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301281 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001282
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001283 /* disable endpoints */
1284 usb_ep_disable(gr->out);
1285 usb_ep_disable(gr->in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001286
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001287 gr->in->driver_data = NULL;
1288 gr->out->driver_data = NULL;
1289
Ofir Cohenfdecb602012-11-16 15:50:01 +02001290 if (trans == USB_GADGET_XPORT_BAM ||
1291 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001292 queue_work(gbam_wq, &port->disconnect_w);
Amit Blay94525352012-12-24 11:23:27 +02001293 else if (trans == USB_GADGET_XPORT_BAM2BAM) {
1294 if (port_num == 0) {
1295 if (usb_bam_client_ready(false)) {
1296 pr_err("%s: usb_bam_client_ready failed\n",
1297 __func__);
1298 }
1299 }
1300 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301}
1302
Ofir Cohena1c2a872011-12-14 10:26:34 +02001303int gbam_connect(struct grmnet *gr, u8 port_num,
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001304 enum transport_type trans, u8 src_connection_idx,
1305 u8 dst_connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306{
1307 struct gbam_port *port;
1308 struct bam_ch_info *d;
1309 int ret;
1310 unsigned long flags;
1311
1312 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1313
Ofir Cohena1c2a872011-12-14 10:26:34 +02001314 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1315 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1316 return -ENODEV;
1317 }
1318
Ofir Cohenfdecb602012-11-16 15:50:01 +02001319 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1320 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
1321 && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1323 return -ENODEV;
1324 }
1325
1326 if (!gr) {
1327 pr_err("%s: grmnet port is null\n", __func__);
1328 return -ENODEV;
1329 }
1330
Ofir Cohena1c2a872011-12-14 10:26:34 +02001331 if (trans == USB_GADGET_XPORT_BAM)
1332 port = bam_ports[port_num].port;
1333 else
1334 port = bam2bam_ports[port_num];
1335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001336 d = &port->data_ch;
1337
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001338 ret = usb_ep_enable(gr->in);
1339 if (ret) {
1340 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1341 __func__, gr->in);
1342 return ret;
1343 }
1344 gr->in->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001345
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001346 ret = usb_ep_enable(gr->out);
1347 if (ret) {
1348 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1349 __func__, gr->out);
1350 gr->in->driver_data = 0;
1351 return ret;
1352 }
1353 gr->out->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001354
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301355 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001356 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301357 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001358
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001359 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohena1c2a872011-12-14 10:26:34 +02001360 d->to_host = 0;
1361 d->to_modem = 0;
1362 d->pending_with_bam = 0;
1363 d->tohost_drp_cnt = 0;
1364 d->tomodem_drp_cnt = 0;
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001365 }
1366
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001367 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301368 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369
Ofir Cohen4da266f2012-01-03 10:19:29 +02001370 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1371 port->gr = gr;
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001372 d->src_connection_idx = src_connection_idx;
1373 d->dst_connection_idx = dst_connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +02001374 } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohenfdecb602012-11-16 15:50:01 +02001375 port->gr = gr;
Ofir Cohen77848d62012-12-05 13:16:10 +02001376 d->ipa_params.src_pipe = &(d->src_pipe_idx);
1377 d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001378 d->ipa_params.src_idx = src_connection_idx;
1379 d->ipa_params.dst_idx = dst_connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001380 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381
Ofir Cohenfdecb602012-11-16 15:50:01 +02001382 d->trans = trans;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383 queue_work(gbam_wq, &port->connect_w);
1384
1385 return 0;
1386}
1387
Ofir Cohena1c2a872011-12-14 10:26:34 +02001388int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001389{
1390 int i;
1391 int ret;
1392
Ofir Cohena1c2a872011-12-14 10:26:34 +02001393 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1394 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395
Ofir Cohena1c2a872011-12-14 10:26:34 +02001396 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1397 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1398 pr_err("%s: Invalid num of ports count:%d,%d\n",
1399 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 return -EINVAL;
1401 }
1402
1403 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1404 if (!gbam_wq) {
1405 pr_err("%s: Unable to create workqueue gbam_wq\n",
1406 __func__);
1407 return -ENOMEM;
1408 }
1409
Ofir Cohena1c2a872011-12-14 10:26:34 +02001410 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301411 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412 ret = gbam_port_alloc(i);
1413 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301414 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1416 goto free_bam_ports;
1417 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 }
1419
Ofir Cohena1c2a872011-12-14 10:26:34 +02001420 for (i = 0; i < no_bam2bam_port; i++) {
1421 n_bam2bam_ports++;
1422 ret = gbam2bam_port_alloc(i);
1423 if (ret) {
1424 n_bam2bam_ports--;
1425 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1426 goto free_bam_ports;
1427 }
1428 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001430 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432free_bam_ports:
1433 for (i = 0; i < n_bam_ports; i++)
1434 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001435 for (i = 0; i < n_bam2bam_ports; i++)
1436 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 destroy_workqueue(gbam_wq);
1438
1439 return ret;
1440}
Amit Blaye5bb35e2012-05-08 20:38:20 +03001441
1442static int gbam_wake_cb(void *param)
1443{
1444 struct gbam_port *port = (struct gbam_port *)param;
1445 struct bam_ch_info *d;
1446 struct f_rmnet *dev;
1447
1448 dev = port_to_rmnet(port->gr);
1449 d = &port->data_ch;
1450
1451 pr_debug("%s: woken up by peer\n", __func__);
1452
1453 return usb_gadget_wakeup(dev->cdev->gadget);
1454}
1455
1456void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
1457{
1458 struct gbam_port *port;
1459 struct bam_ch_info *d;
1460
Ofir Cohenfdecb602012-11-16 15:50:01 +02001461 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1462 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001463 return;
1464
1465 port = bam2bam_ports[port_num];
1466 d = &port->data_ch;
1467
1468 pr_debug("%s: suspended port %d\n", __func__, port_num);
1469
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001470 usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
Lena Salman05b544f2013-05-13 15:49:10 +03001471 if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
1472 usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
1473 usb_bam_suspend(&d->ipa_params);
1474 }
Amit Blaye5bb35e2012-05-08 20:38:20 +03001475}
1476
1477void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
1478{
1479 struct gbam_port *port;
1480 struct bam_ch_info *d;
1481
Ofir Cohenfdecb602012-11-16 15:50:01 +02001482 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1483 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001484 return;
1485
1486 port = bam2bam_ports[port_num];
1487 d = &port->data_ch;
1488
1489 pr_debug("%s: resumed port %d\n", __func__, port_num);
1490
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001491 usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
Lena Salman05b544f2013-05-13 15:49:10 +03001492 if (trans == USB_GADGET_XPORT_BAM2BAM_IPA)
1493 usb_bam_resume(&d->ipa_params);
Amit Blaye5bb35e2012-05-08 20:38:20 +03001494}