blob: 843c2070b39dc8ed656f64ad05a6d7a6734ca502 [file] [log] [blame]
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +05301/* Copyright (c) 2011-2013, Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
Shimrit Malichi194fe122012-07-25 13:50:41 +030027#include <linux/usb/msm_hsusb.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020028#include <mach/usb_bam.h>
29
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include "u_rmnet.h"
31
32#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020033#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static struct workqueue_struct *gbam_wq;
36static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020037static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053038static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039static unsigned bam_ch_ids[] = { 8 };
40
Jack Phameffd4ae2011-08-03 16:49:36 -070041static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
42
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080045#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
46#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#define BAM_MUX_HDR 8
50
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051#define BAM_MUX_RX_Q_SIZE 16
52#define BAM_MUX_TX_Q_SIZE 200
Manu Gautam15203302012-09-26 11:12:54 +053053#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053055#define DL_INTR_THRESHOLD 20
56
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
58module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
61module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
64module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
67module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Vamsi Krishna8f24f252011-11-02 11:46:08 -070069unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
70module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
Vamsi Krishna8f24f252011-11-02 11:46:08 -070072unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
73module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Vamsi Krishna8f24f252011-11-02 11:46:08 -070075unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
76module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053078unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
79module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
80
Jack Phameffd4ae2011-08-03 16:49:36 -070081#define BAM_CH_OPENED BIT(0)
82#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070085 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 unsigned id;
87
88 struct list_head tx_idle;
89 struct sk_buff_head tx_skb_q;
90
91 struct list_head rx_idle;
92 struct sk_buff_head rx_skb_q;
93
94 struct gbam_port *port;
95 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053096 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
Ofir Cohena1c2a872011-12-14 10:26:34 +020098 struct usb_request *rx_req;
99 struct usb_request *tx_req;
100
Shimrit Malichi255b5342012-08-02 21:01:43 +0300101 u32 src_pipe_idx;
102 u32 dst_pipe_idx;
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200103 u8 src_connection_idx;
104 u8 dst_connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200105 enum transport_type trans;
Ofir Cohen77848d62012-12-05 13:16:10 +0200106 struct usb_bam_connect_ipa_params ipa_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 /* stats */
109 unsigned int pending_with_bam;
110 unsigned int tohost_drp_cnt;
111 unsigned int tomodem_drp_cnt;
112 unsigned int tx_len;
113 unsigned int rx_len;
114 unsigned long to_modem;
115 unsigned long to_host;
116};
117
118struct gbam_port {
119 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530120 spinlock_t port_lock_ul;
121 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122
123 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200124 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125
126 struct bam_ch_info data_ch;
127
128 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800129 struct work_struct disconnect_w;
Bar Weiner84e1ba02013-07-18 09:08:03 +0300130 struct work_struct suspend_w;
131 struct work_struct resume_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132};
133
134static struct bam_portmaster {
135 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700136 struct platform_driver pdrv;
137} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138
Ofir Cohena1c2a872011-12-14 10:26:34 +0200139struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200141static void gbam_start_endless_rx(struct gbam_port *port);
142static void gbam_start_endless_tx(struct gbam_port *port);
Amit Blay94525352012-12-24 11:23:27 +0200143static int gbam_peer_reset_cb(void *param);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144
145/*---------------misc functions---------------- */
146static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
147{
148 struct usb_request *req;
149
150 while (!list_empty(head)) {
151 req = list_entry(head->next, struct usb_request, list);
152 list_del(&req->list);
153 usb_ep_free_request(ep, req);
154 }
155}
156
157static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
158 int num,
159 void (*cb)(struct usb_ep *ep, struct usb_request *),
160 gfp_t flags)
161{
162 int i;
163 struct usb_request *req;
164
165 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
166 ep, head, num, cb);
167
168 for (i = 0; i < num; i++) {
169 req = usb_ep_alloc_request(ep, flags);
170 if (!req) {
171 pr_debug("%s: req allocated:%d\n", __func__, i);
172 return list_empty(head) ? -ENOMEM : 0;
173 }
174 req->complete = cb;
175 list_add(&req->list, head);
176 }
177
178 return 0;
179}
180/*--------------------------------------------- */
181
182/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530183static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184{
185 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530186 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 struct sk_buff *skb;
188 int ret;
189 struct usb_request *req;
190 struct usb_ep *ep;
191
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530192 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530194 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 return;
196 }
197
198 ep = port->port_usb->in;
199
200 while (!list_empty(&d->tx_idle)) {
201 skb = __skb_dequeue(&d->tx_skb_q);
202 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530203 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 return;
205 }
206 req = list_first_entry(&d->tx_idle,
207 struct usb_request,
208 list);
209 req->context = skb;
210 req->buf = skb->data;
211 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530212 n_tx_req_queued++;
213 if (n_tx_req_queued == dl_intr_threshold) {
214 req->no_interrupt = 0;
215 n_tx_req_queued = 0;
216 } else {
217 req->no_interrupt = 1;
218 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219
Rajkumar Raghupathyf05df312013-06-04 17:57:49 +0530220 /* Send ZLP in case packet length is multiple of maxpacksize */
221 req->zero = 1;
222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 list_del(&req->list);
224
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530225 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530227 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 if (ret) {
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530229 pr_err("%s: usb epIn failed with %d\n", __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 list_add(&req->list, &d->tx_idle);
231 dev_kfree_skb_any(skb);
232 break;
233 }
234 d->to_host++;
235 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530236 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237}
238
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530239static void gbam_write_data_tohost_w(struct work_struct *w)
240{
241 struct bam_ch_info *d;
242 struct gbam_port *port;
243
244 d = container_of(w, struct bam_ch_info, write_tohost_w);
245 port = d->port;
246
247 gbam_write_data_tohost(port);
248}
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250void gbam_data_recv_cb(void *p, struct sk_buff *skb)
251{
252 struct gbam_port *port = p;
253 struct bam_ch_info *d = &port->data_ch;
254 unsigned long flags;
255
256 if (!skb)
257 return;
258
259 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
260 port, port->port_num, d, skb->len);
261
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530262 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530264 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 dev_kfree_skb_any(skb);
266 return;
267 }
268
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700269 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270 d->tohost_drp_cnt++;
271 if (printk_ratelimit())
272 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
273 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530274 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 dev_kfree_skb_any(skb);
276 return;
277 }
278
279 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530280 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530282 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283}
284
285void gbam_data_write_done(void *p, struct sk_buff *skb)
286{
287 struct gbam_port *port = p;
288 struct bam_ch_info *d = &port->data_ch;
289 unsigned long flags;
290
291 if (!skb)
292 return;
293
294 dev_kfree_skb_any(skb);
295
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530296 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297
298 d->pending_with_bam--;
299
300 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
301 port, d, d->to_modem,
302 d->pending_with_bam, port->port_num);
303
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530304 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305
Vamsi Krishna84579552011-11-09 15:33:22 -0800306 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307}
308
309static void gbam_data_write_tobam(struct work_struct *w)
310{
311 struct gbam_port *port;
312 struct bam_ch_info *d;
313 struct sk_buff *skb;
314 unsigned long flags;
315 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800316 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317
318 d = container_of(w, struct bam_ch_info, write_tobam_w);
319 port = d->port;
320
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530321 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530323 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 return;
325 }
326
Vamsi Krishna84579552011-11-09 15:33:22 -0800327 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800328 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800329 if (!skb)
330 break;
331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 d->pending_with_bam++;
333 d->to_modem++;
334
335 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
336 port, d, d->to_modem, d->pending_with_bam,
337 port->port_num);
338
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530339 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530341 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 if (ret) {
343 pr_debug("%s: write error:%d\n", __func__, ret);
344 d->pending_with_bam--;
345 d->to_modem--;
346 d->tomodem_drp_cnt++;
347 dev_kfree_skb_any(skb);
348 break;
349 }
350 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800351
352 qlen = d->rx_skb_q.qlen;
353
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530354 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800355
356 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
357 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358}
359/*-------------------------------------------------------------*/
360
361static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
362{
363 struct gbam_port *port = ep->driver_data;
364 struct bam_ch_info *d;
365 struct sk_buff *skb = req->context;
366 int status = req->status;
367
368 switch (status) {
369 case 0:
370 /* successful completion */
Manu Gautam06277e42013-04-01 15:07:29 +0530371 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 case -ECONNRESET:
373 case -ESHUTDOWN:
374 /* connection gone */
Manu Gautam06277e42013-04-01 15:07:29 +0530375 dev_kfree_skb_any(skb);
376 usb_ep_free_request(ep, req);
377 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 default:
379 pr_err("%s: data tx ep error %d\n",
380 __func__, status);
381 break;
382 }
383
384 dev_kfree_skb_any(skb);
385
386 if (!port)
387 return;
388
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530389 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 d = &port->data_ch;
391 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530392 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530394 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395}
396
397static void
398gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
399{
400 struct gbam_port *port = ep->driver_data;
401 struct bam_ch_info *d = &port->data_ch;
402 struct sk_buff *skb = req->context;
403 int status = req->status;
404 int queue = 0;
405
406 switch (status) {
407 case 0:
408 skb_put(skb, req->actual);
409 queue = 1;
410 break;
411 case -ECONNRESET:
412 case -ESHUTDOWN:
413 /* cable disconnection */
414 dev_kfree_skb_any(skb);
415 req->buf = 0;
416 usb_ep_free_request(ep, req);
417 return;
418 default:
419 if (printk_ratelimit())
420 pr_err("%s: %s response error %d, %d/%d\n",
421 __func__, ep->name, status,
422 req->actual, req->length);
423 dev_kfree_skb_any(skb);
424 break;
425 }
426
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530427 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 if (queue) {
429 __skb_queue_tail(&d->rx_skb_q, skb);
430 queue_work(gbam_wq, &d->write_tobam_w);
431 }
432
433 /* TODO: Handle flow control gracefully by having
434 * having call back mechanism from bam driver
435 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700436 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800437 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438
439 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530440 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 return;
442 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530443 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700445 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530447 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530449 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 return;
451 }
452 skb_reserve(skb, BAM_MUX_HDR);
453
454 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700455 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456 req->context = skb;
457
458 status = usb_ep_queue(ep, req, GFP_ATOMIC);
459 if (status) {
460 dev_kfree_skb_any(skb);
461
462 if (printk_ratelimit())
463 pr_err("%s: data rx enqueue err %d\n",
464 __func__, status);
465
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530466 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530468 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 }
470}
471
Ofir Cohena1c2a872011-12-14 10:26:34 +0200472static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
473{
474 int status = req->status;
475
476 pr_debug("%s status: %d\n", __func__, status);
477}
478
479static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
480{
481 int status = req->status;
482
483 pr_debug("%s status: %d\n", __func__, status);
484}
485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486static void gbam_start_rx(struct gbam_port *port)
487{
488 struct usb_request *req;
489 struct bam_ch_info *d;
490 struct usb_ep *ep;
491 unsigned long flags;
492 int ret;
493 struct sk_buff *skb;
494
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530495 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530497 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 return;
499 }
500
501 d = &port->data_ch;
502 ep = port->port_usb->out;
503
504 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800505
506 if (bam_mux_rx_fctrl_support &&
507 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
508 break;
509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 req = list_first_entry(&d->rx_idle, struct usb_request, list);
511
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700512 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 if (!skb)
514 break;
515 skb_reserve(skb, BAM_MUX_HDR);
516
517 list_del(&req->list);
518 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700519 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 req->context = skb;
521
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530522 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530524 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525 if (ret) {
526 dev_kfree_skb_any(skb);
527
528 if (printk_ratelimit())
Chiranjeevi Velempatia06b2232013-01-04 10:10:52 +0530529 pr_err("%s: rx queue failed %d\n",
530 __func__, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531
532 if (port->port_usb)
533 list_add(&req->list, &d->rx_idle);
534 else
535 usb_ep_free_request(ep, req);
536 break;
537 }
538 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530539 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540}
541
Ofir Cohena1c2a872011-12-14 10:26:34 +0200542static void gbam_start_endless_rx(struct gbam_port *port)
543{
544 struct bam_ch_info *d = &port->data_ch;
545 int status;
546
Lena Salmana12bc322013-07-02 15:04:42 +0300547 spin_lock(&port->port_lock_ul);
Lena Salman05b544f2013-05-13 15:49:10 +0300548 if (!port->port_usb) {
Lena Salmana12bc322013-07-02 15:04:42 +0300549 spin_unlock(&port->port_lock_ul);
Lena Salman05b544f2013-05-13 15:49:10 +0300550 pr_err("%s: port->port_usb is NULL", __func__);
Amit Blay94525352012-12-24 11:23:27 +0200551 return;
Lena Salman05b544f2013-05-13 15:49:10 +0300552 }
Amit Blay94525352012-12-24 11:23:27 +0200553
Lena Salman05b544f2013-05-13 15:49:10 +0300554 pr_debug("%s: enqueue\n", __func__);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200555 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
556 if (status)
557 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
Lena Salmana12bc322013-07-02 15:04:42 +0300558 spin_unlock(&port->port_lock_ul);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200559}
560
561static void gbam_start_endless_tx(struct gbam_port *port)
562{
563 struct bam_ch_info *d = &port->data_ch;
564 int status;
565
Lena Salmana12bc322013-07-02 15:04:42 +0300566 spin_lock(&port->port_lock_dl);
Lena Salman05b544f2013-05-13 15:49:10 +0300567 if (!port->port_usb) {
Lena Salmana12bc322013-07-02 15:04:42 +0300568 spin_unlock(&port->port_lock_dl);
Lena Salman05b544f2013-05-13 15:49:10 +0300569 pr_err("%s: port->port_usb is NULL", __func__);
Amit Blay94525352012-12-24 11:23:27 +0200570 return;
Lena Salman05b544f2013-05-13 15:49:10 +0300571 }
Amit Blay94525352012-12-24 11:23:27 +0200572
Lena Salman05b544f2013-05-13 15:49:10 +0300573 pr_debug("%s: enqueue\n", __func__);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200574 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
575 if (status)
576 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
Lena Salmana12bc322013-07-02 15:04:42 +0300577 spin_unlock(&port->port_lock_dl);
578
Ofir Cohena1c2a872011-12-14 10:26:34 +0200579}
580
Lena Salman05b544f2013-05-13 15:49:10 +0300581static void gbam_stop_endless_rx(struct gbam_port *port)
582{
583 struct bam_ch_info *d = &port->data_ch;
584 int status;
585
Lena Salmana12bc322013-07-02 15:04:42 +0300586 spin_lock(&port->port_lock_ul);
Lena Salman05b544f2013-05-13 15:49:10 +0300587 if (!port->port_usb) {
Lena Salmana12bc322013-07-02 15:04:42 +0300588 spin_unlock(&port->port_lock_ul);
Lena Salman05b544f2013-05-13 15:49:10 +0300589 pr_err("%s: port->port_usb is NULL", __func__);
590 return;
591 }
592 pr_debug("%s: dequeue\n", __func__);
593
594 status = usb_ep_dequeue(port->port_usb->out, d->rx_req);
595 if (status)
596 pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
Lena Salmana12bc322013-07-02 15:04:42 +0300597 spin_unlock(&port->port_lock_ul);
Lena Salman05b544f2013-05-13 15:49:10 +0300598}
Lena Salmana12bc322013-07-02 15:04:42 +0300599
Lena Salman05b544f2013-05-13 15:49:10 +0300600static void gbam_stop_endless_tx(struct gbam_port *port)
601{
602 struct bam_ch_info *d = &port->data_ch;
603 int status;
604
Lena Salmana12bc322013-07-02 15:04:42 +0300605 spin_lock(&port->port_lock_dl);
Lena Salman05b544f2013-05-13 15:49:10 +0300606 if (!port->port_usb) {
Lena Salmana12bc322013-07-02 15:04:42 +0300607 spin_unlock(&port->port_lock_dl);
Lena Salman05b544f2013-05-13 15:49:10 +0300608 pr_err("%s: port->port_usb is NULL", __func__);
609 return;
610 }
611
612 pr_debug("%s: dequeue\n", __func__);
613 status = usb_ep_dequeue(port->port_usb->in, d->tx_req);
614 if (status)
615 pr_err("%s: error dequeuing transfer, %d\n", __func__, status);
Lena Salmana12bc322013-07-02 15:04:42 +0300616 spin_unlock(&port->port_lock_dl);
Lena Salman05b544f2013-05-13 15:49:10 +0300617}
618
619static void gbam_start(void *param, enum usb_bam_pipe_dir dir)
620{
621 struct gbam_port *port = param;
622
623 if (dir == USB_TO_PEER_PERIPHERAL)
624 gbam_start_endless_rx(port);
625 else
626 gbam_start_endless_tx(port);
627}
628
629static void gbam_stop(void *param, enum usb_bam_pipe_dir dir)
630{
631 struct gbam_port *port = param;
632
633 if (dir == USB_TO_PEER_PERIPHERAL)
634 gbam_stop_endless_rx(port);
635 else
636 gbam_stop_endless_tx(port);
637}
638
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639static void gbam_start_io(struct gbam_port *port)
640{
641 unsigned long flags;
642 struct usb_ep *ep;
643 int ret;
644 struct bam_ch_info *d;
645
646 pr_debug("%s: port:%p\n", __func__, port);
647
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530648 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530650 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 return;
652 }
653
654 d = &port->data_ch;
655 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700656 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 gbam_epout_complete, GFP_ATOMIC);
658 if (ret) {
659 pr_err("%s: rx req allocation failed\n", __func__);
Manu Gautam24823342013-06-13 16:33:49 +0530660 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 return;
662 }
663
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530664 spin_unlock_irqrestore(&port->port_lock_ul, flags);
665 spin_lock_irqsave(&port->port_lock_dl, flags);
Chiranjeevi Velempati44d02982013-02-17 22:09:08 +0530666 if (!port->port_usb) {
667 gbam_free_requests(ep, &d->rx_idle);
668 spin_unlock_irqrestore(&port->port_lock_dl, flags);
669 return;
670 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700672 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 gbam_epin_complete, GFP_ATOMIC);
674 if (ret) {
675 pr_err("%s: tx req allocation failed\n", __func__);
676 gbam_free_requests(ep, &d->rx_idle);
Manu Gautam24823342013-06-13 16:33:49 +0530677 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 return;
679 }
680
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530681 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682
683 /* queue out requests */
684 gbam_start_rx(port);
685}
686
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600687static void gbam_notify(void *p, int event, unsigned long data)
688{
689 switch (event) {
690 case BAM_DMUX_RECEIVE:
691 gbam_data_recv_cb(p, (struct sk_buff *)(data));
692 break;
693 case BAM_DMUX_WRITE_DONE:
694 gbam_data_write_done(p, (struct sk_buff *)(data));
695 break;
696 }
697}
698
Ofir Cohena1c2a872011-12-14 10:26:34 +0200699static void gbam_free_buffers(struct gbam_port *port)
700{
701 struct sk_buff *skb;
702 unsigned long flags;
703 struct bam_ch_info *d;
704
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530705 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800706 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200707
708 if (!port || !port->port_usb)
709 goto free_buf_out;
710
711 d = &port->data_ch;
712
713 gbam_free_requests(port->port_usb->in, &d->tx_idle);
714 gbam_free_requests(port->port_usb->out, &d->rx_idle);
715
716 while ((skb = __skb_dequeue(&d->tx_skb_q)))
717 dev_kfree_skb_any(skb);
718
719 while ((skb = __skb_dequeue(&d->rx_skb_q)))
720 dev_kfree_skb_any(skb);
721
722free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800723 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530724 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200725}
726
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800727static void gbam_disconnect_work(struct work_struct *w)
728{
729 struct gbam_port *port =
730 container_of(w, struct gbam_port, disconnect_w);
731 struct bam_ch_info *d = &port->data_ch;
732
733 if (!test_bit(BAM_CH_OPENED, &d->flags))
734 return;
735
736 msm_bam_dmux_close(d->id);
737 clear_bit(BAM_CH_OPENED, &d->flags);
738}
739
Ofir Cohenfdecb602012-11-16 15:50:01 +0200740static void gbam2bam_disconnect_work(struct work_struct *w)
741{
Shimrit Malichi419fdac2013-01-16 14:35:31 +0200742 struct gbam_port *port =
743 container_of(w, struct gbam_port, disconnect_w);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200744 struct bam_ch_info *d = &port->data_ch;
745 int ret;
746
747 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200748 ret = usb_bam_disconnect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200749 if (ret)
750 pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
751 __func__, ret);
Lena Salman7c0e9792013-06-20 14:36:54 +0300752 teth_bridge_disconnect();
Ofir Cohenfdecb602012-11-16 15:50:01 +0200753 }
754}
755
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756static void gbam_connect_work(struct work_struct *w)
757{
758 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
759 struct bam_ch_info *d = &port->data_ch;
760 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800761 unsigned long flags;
762
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530763 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800764 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800765 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800766 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530767 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800768 return;
769 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800770 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530771 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700772
Jack Phameffd4ae2011-08-03 16:49:36 -0700773 if (!test_bit(BAM_CH_READY, &d->flags))
774 return;
775
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600776 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777 if (ret) {
778 pr_err("%s: unable open bam ch:%d err:%d\n",
779 __func__, d->id, ret);
780 return;
781 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700782 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783
784 gbam_start_io(port);
785
786 pr_debug("%s: done\n", __func__);
787}
788
Ofir Cohena1c2a872011-12-14 10:26:34 +0200789static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700790{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200791 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200792 struct teth_bridge_connect_params connect_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200793 struct bam_ch_info *d = &port->data_ch;
794 u32 sps_params;
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200795 ipa_notify_cb usb_notify_cb;
796 void *priv;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200797 int ret;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300798 unsigned long flags;
Jack Phameffd4ae2011-08-03 16:49:36 -0700799
Ofir Cohenfdecb602012-11-16 15:50:01 +0200800 if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
Bar Weiner189bb3c2013-06-09 14:24:56 +0300801 usb_bam_reset_complete();
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200802 ret = usb_bam_connect(d->src_connection_idx, &d->src_pipe_idx);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200803 if (ret) {
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200804 pr_err("%s: usb_bam_connect (src) failed: err:%d\n",
805 __func__, ret);
806 return;
807 }
808 ret = usb_bam_connect(d->dst_connection_idx, &d->dst_pipe_idx);
809 if (ret) {
810 pr_err("%s: usb_bam_connect (dst) failed: err:%d\n",
Ofir Cohenfdecb602012-11-16 15:50:01 +0200811 __func__, ret);
812 return;
813 }
814 } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200815 ret = teth_bridge_init(&usb_notify_cb, &priv);
816 if (ret) {
817 pr_err("%s:teth_bridge_init() failed\n", __func__);
818 return;
819 }
820 d->ipa_params.notify = usb_notify_cb;
821 d->ipa_params.priv = priv;
822 d->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
823
Lena Salmanabde35d2013-04-25 15:29:43 +0300824 d->ipa_params.client = IPA_CLIENT_USB_PROD;
825 d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
Ofir Cohen77848d62012-12-05 13:16:10 +0200826 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200827 if (ret) {
828 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
829 __func__, ret);
830 return;
831 }
832
Lena Salmanabde35d2013-04-25 15:29:43 +0300833 d->ipa_params.client = IPA_CLIENT_USB_CONS;
834 d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
Ofir Cohen77848d62012-12-05 13:16:10 +0200835 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200836 if (ret) {
837 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
838 __func__, ret);
839 return;
840 }
Shimrit Malichi085a3d22013-03-10 10:30:08 +0200841
842 connect_params.ipa_usb_pipe_hdl = d->ipa_params.prod_clnt_hdl;
843 connect_params.usb_ipa_pipe_hdl = d->ipa_params.cons_clnt_hdl;
844 connect_params.tethering_mode = TETH_TETHERING_MODE_RMNET;
845 ret = teth_bridge_connect(&connect_params);
846 if (ret) {
847 pr_err("%s:teth_bridge_connect() failed\n", __func__);
848 return;
849 }
Ofir Cohena1c2a872011-12-14 10:26:34 +0200850 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700851
Shimrit Malichi841fe172013-04-23 11:52:19 +0300852 spin_lock_irqsave(&port->port_lock_ul, flags);
853 spin_lock(&port->port_lock_dl);
854 if (!port->port_usb) {
855 pr_debug("%s: usb cable is disconnected, exiting\n", __func__);
856 spin_unlock(&port->port_lock_dl);
857 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200858 return;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300859 }
860 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_ATOMIC);
861 if (!d->rx_req) {
862 spin_unlock(&port->port_lock_dl);
863 spin_unlock_irqrestore(&port->port_lock_ul, flags);
864 pr_err("%s: out of memory\n", __func__);
865 return;
866 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700867
Ofir Cohena1c2a872011-12-14 10:26:34 +0200868 d->rx_req->context = port;
869 d->rx_req->complete = gbam_endless_rx_complete;
870 d->rx_req->length = 0;
Bar Weiner8475d632013-06-13 11:29:49 +0300871 d->rx_req->no_interrupt = 1;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200872 sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
873 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200874 d->rx_req->udc_priv = sps_params;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300875
876 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_ATOMIC);
877 spin_unlock(&port->port_lock_dl);
878 spin_unlock_irqrestore(&port->port_lock_ul, flags);
879 if (!d->tx_req) {
880 pr_err("%s: out of memory\n", __func__);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200881 return;
Shimrit Malichi841fe172013-04-23 11:52:19 +0300882 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700883
Ofir Cohena1c2a872011-12-14 10:26:34 +0200884 d->tx_req->context = port;
885 d->tx_req->complete = gbam_endless_tx_complete;
886 d->tx_req->length = 0;
Bar Weiner8475d632013-06-13 11:29:49 +0300887 d->tx_req->no_interrupt = 1;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200888 sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
889 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200890 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700891
Ofir Cohena1c2a872011-12-14 10:26:34 +0200892 /* queue in & out requests */
893 gbam_start_endless_rx(port);
894 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700895
Amit Blay94525352012-12-24 11:23:27 +0200896 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0) {
897 /* Register for peer reset callback */
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200898 usb_bam_register_peer_reset_cb(gbam_peer_reset_cb, port);
Amit Blay94525352012-12-24 11:23:27 +0200899
900 ret = usb_bam_client_ready(true);
901 if (ret) {
902 pr_err("%s: usb_bam_client_ready failed: err:%d\n",
903 __func__, ret);
904 return;
905 }
906 }
907
Ofir Cohena1c2a872011-12-14 10:26:34 +0200908 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700909}
910
Bar Weiner84e1ba02013-07-18 09:08:03 +0300911static int gbam_wake_cb(void *param)
912{
913 struct gbam_port *port = (struct gbam_port *)param;
914 struct bam_ch_info *d;
915 struct f_rmnet *dev;
916
917 dev = port_to_rmnet(port->gr);
918 d = &port->data_ch;
919
920 pr_debug("%s: woken up by peer\n", __func__);
921
922 return usb_gadget_wakeup(dev->cdev->gadget);
923}
924
925static void gbam2bam_suspend_work(struct work_struct *w)
926{
927 struct gbam_port *port = container_of(w, struct gbam_port, suspend_w);
928 struct bam_ch_info *d = &port->data_ch;
929
930 pr_debug("%s: suspend work started\n", __func__);
931
932 usb_bam_register_wake_cb(d->dst_connection_idx, gbam_wake_cb, port);
933 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
934 usb_bam_register_start_stop_cbs(gbam_start, gbam_stop, port);
935 usb_bam_suspend(&d->ipa_params);
936 }
937}
938
939static void gbam2bam_resume_work(struct work_struct *w)
940{
941 struct gbam_port *port = container_of(w, struct gbam_port, resume_w);
942 struct bam_ch_info *d = &port->data_ch;
943
944 pr_debug("%s: resume work started\n", __func__);
945
946 usb_bam_register_wake_cb(d->dst_connection_idx, NULL, NULL);
947 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA)
948 usb_bam_resume(&d->ipa_params);
949}
950
Amit Blay94525352012-12-24 11:23:27 +0200951static int gbam_peer_reset_cb(void *param)
952{
953 struct gbam_port *port = (struct gbam_port *)param;
954 struct bam_ch_info *d;
955 struct f_rmnet *dev;
956 struct usb_gadget *gadget;
957 int ret;
958 bool reenable_eps = false;
959
960 dev = port_to_rmnet(port->gr);
961 d = &port->data_ch;
962
963 gadget = dev->cdev->gadget;
964
965 pr_debug("%s: reset by peer\n", __func__);
966
967 /* Disable the relevant EPs if currently EPs are enabled */
968 if (port->port_usb && port->port_usb->in &&
969 port->port_usb->in->driver_data) {
970 usb_ep_disable(port->port_usb->out);
971 usb_ep_disable(port->port_usb->in);
972
973 port->port_usb->in->driver_data = NULL;
974 port->port_usb->out->driver_data = NULL;
975 reenable_eps = true;
976 }
977
978 /* Disable BAM */
979 msm_hw_bam_disable(1);
980
981 /* Reset BAM */
Bar Weiner189bb3c2013-06-09 14:24:56 +0300982 ret = usb_bam_a2_reset(0);
Amit Blay94525352012-12-24 11:23:27 +0200983 if (ret) {
984 pr_err("%s: BAM reset failed %d\n", __func__, ret);
985 goto reenable_eps;
986 }
987
988 /* Enable BAM */
989 msm_hw_bam_disable(0);
990
991reenable_eps:
992 /* Re-Enable the relevant EPs, if EPs were originally enabled */
993 if (reenable_eps) {
994 ret = usb_ep_enable(port->port_usb->in);
995 if (ret) {
996 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
997 __func__, port->port_usb->in);
998 return ret;
999 }
1000 port->port_usb->in->driver_data = port;
1001
1002 ret = usb_ep_enable(port->port_usb->out);
1003 if (ret) {
1004 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1005 __func__, port->port_usb->out);
1006 port->port_usb->in->driver_data = 0;
1007 return ret;
1008 }
1009 port->port_usb->out->driver_data = port;
1010
1011 gbam_start_endless_rx(port);
1012 gbam_start_endless_tx(port);
1013 }
1014
1015 /* Unregister the peer reset callback */
1016 if (d->trans == USB_GADGET_XPORT_BAM2BAM && port->port_num == 0)
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001017 usb_bam_register_peer_reset_cb(NULL, NULL);
Amit Blay94525352012-12-24 11:23:27 +02001018
1019 return 0;
1020}
1021
Jack Phameffd4ae2011-08-03 16:49:36 -07001022/* BAM data channel ready, allow attempt to open */
1023static int gbam_data_ch_probe(struct platform_device *pdev)
1024{
1025 struct gbam_port *port;
1026 struct bam_ch_info *d;
1027 int i;
1028 unsigned long flags;
1029
1030 pr_debug("%s: name:%s\n", __func__, pdev->name);
1031
1032 for (i = 0; i < n_bam_ports; i++) {
1033 port = bam_ports[i].port;
1034 d = &port->data_ch;
1035
1036 if (!strncmp(bam_ch_names[i], pdev->name,
1037 BAM_DMUX_CH_NAME_MAX_LEN)) {
1038 set_bit(BAM_CH_READY, &d->flags);
1039
1040 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301041 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001042 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -07001043 if (port->port_usb)
1044 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001045 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301046 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -07001047
1048 break;
1049 }
1050 }
1051
1052 return 0;
1053}
1054
1055/* BAM data channel went inactive, so close it */
1056static int gbam_data_ch_remove(struct platform_device *pdev)
1057{
1058 struct gbam_port *port;
1059 struct bam_ch_info *d;
1060 struct usb_ep *ep_in = NULL;
1061 struct usb_ep *ep_out = NULL;
1062 unsigned long flags;
1063 int i;
1064
1065 pr_debug("%s: name:%s\n", __func__, pdev->name);
1066
1067 for (i = 0; i < n_bam_ports; i++) {
1068 if (!strncmp(bam_ch_names[i], pdev->name,
1069 BAM_DMUX_CH_NAME_MAX_LEN)) {
1070 port = bam_ports[i].port;
1071 d = &port->data_ch;
1072
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301073 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001074 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -07001075 if (port->port_usb) {
1076 ep_in = port->port_usb->in;
1077 ep_out = port->port_usb->out;
1078 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001079 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301080 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -07001081
1082 if (ep_in)
1083 usb_ep_fifo_flush(ep_in);
1084 if (ep_out)
1085 usb_ep_fifo_flush(ep_out);
1086
1087 gbam_free_buffers(port);
1088
1089 msm_bam_dmux_close(d->id);
1090
Vamsi Krishna7658bd12012-01-13 10:32:00 -08001091 /* bam dmux will free all pending skbs */
1092 d->pending_with_bam = 0;
1093
Jack Phameffd4ae2011-08-03 16:49:36 -07001094 clear_bit(BAM_CH_READY, &d->flags);
1095 clear_bit(BAM_CH_OPENED, &d->flags);
1096 }
1097 }
1098
1099 return 0;
1100}
1101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001102static void gbam_port_free(int portno)
1103{
1104 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -07001105 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106
Jack Phameffd4ae2011-08-03 16:49:36 -07001107 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -07001109 platform_driver_unregister(pdrv);
1110 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111}
1112
Ofir Cohena1c2a872011-12-14 10:26:34 +02001113static void gbam2bam_port_free(int portno)
1114{
1115 struct gbam_port *port = bam2bam_ports[portno];
1116
1117 kfree(port);
1118}
1119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120static int gbam_port_alloc(int portno)
1121{
1122 struct gbam_port *port;
1123 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -07001124 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125
1126 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
1127 if (!port)
1128 return -ENOMEM;
1129
1130 port->port_num = portno;
1131
1132 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301133 spin_lock_init(&port->port_lock_ul);
1134 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001136 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137
1138 /* data ch */
1139 d = &port->data_ch;
1140 d->port = port;
1141 INIT_LIST_HEAD(&d->tx_idle);
1142 INIT_LIST_HEAD(&d->rx_idle);
1143 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +05301144 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 skb_queue_head_init(&d->tx_skb_q);
1146 skb_queue_head_init(&d->rx_skb_q);
1147 d->id = bam_ch_ids[portno];
1148
1149 bam_ports[portno].port = port;
1150
Jack Phameffd4ae2011-08-03 16:49:36 -07001151 pdrv = &bam_ports[portno].pdrv;
1152 pdrv->probe = gbam_data_ch_probe;
1153 pdrv->remove = gbam_data_ch_remove;
1154 pdrv->driver.name = bam_ch_names[portno];
1155 pdrv->driver.owner = THIS_MODULE;
1156
1157 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001158 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1159
1160 return 0;
1161}
1162
1163static int gbam2bam_port_alloc(int portno)
1164{
1165 struct gbam_port *port;
1166 struct bam_ch_info *d;
1167
1168 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
1169 if (!port)
1170 return -ENOMEM;
1171
1172 port->port_num = portno;
1173
1174 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301175 spin_lock_init(&port->port_lock_ul);
1176 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001177
1178 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
Ofir Cohenfdecb602012-11-16 15:50:01 +02001179 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
Bar Weiner84e1ba02013-07-18 09:08:03 +03001180 INIT_WORK(&port->suspend_w, gbam2bam_suspend_work);
1181 INIT_WORK(&port->resume_w, gbam2bam_resume_work);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001182
1183 /* data ch */
1184 d = &port->data_ch;
1185 d->port = port;
1186 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -07001187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
1189
1190 return 0;
1191}
1192
1193#if defined(CONFIG_DEBUG_FS)
1194#define DEBUG_BUF_SIZE 1024
1195static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
1196 size_t count, loff_t *ppos)
1197{
1198 struct gbam_port *port;
1199 struct bam_ch_info *d;
1200 char *buf;
1201 unsigned long flags;
1202 int ret;
1203 int i;
1204 int temp = 0;
1205
1206 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
1207 if (!buf)
1208 return -ENOMEM;
1209
1210 for (i = 0; i < n_bam_ports; i++) {
1211 port = bam_ports[i].port;
1212 if (!port)
1213 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301214 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001215 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216
1217 d = &port->data_ch;
1218
1219 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
1220 "#PORT:%d port:%p data_ch:%p#\n"
1221 "dpkts_to_usbhost: %lu\n"
1222 "dpkts_to_modem: %lu\n"
1223 "dpkts_pwith_bam: %u\n"
1224 "to_usbhost_dcnt: %u\n"
1225 "tomodem__dcnt: %u\n"
1226 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -08001227 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -07001228 "data_ch_open: %d\n"
1229 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230 i, port, &port->data_ch,
1231 d->to_host, d->to_modem,
1232 d->pending_with_bam,
1233 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -08001234 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -07001235 test_bit(BAM_CH_OPENED, &d->flags),
1236 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001238 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301239 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 }
1241
1242 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
1243
1244 kfree(buf);
1245
1246 return ret;
1247}
1248
1249static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
1250 size_t count, loff_t *ppos)
1251{
1252 struct gbam_port *port;
1253 struct bam_ch_info *d;
1254 int i;
1255 unsigned long flags;
1256
1257 for (i = 0; i < n_bam_ports; i++) {
1258 port = bam_ports[i].port;
1259 if (!port)
1260 continue;
1261
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301262 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001263 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264
1265 d = &port->data_ch;
1266
1267 d->to_host = 0;
1268 d->to_modem = 0;
1269 d->pending_with_bam = 0;
1270 d->tohost_drp_cnt = 0;
1271 d->tomodem_drp_cnt = 0;
1272
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001273 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301274 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 }
1276 return count;
1277}
1278
1279const struct file_operations gbam_stats_ops = {
1280 .read = gbam_read_stats,
1281 .write = gbam_reset_stats,
1282};
1283
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301284struct dentry *gbam_dent;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285static void gbam_debugfs_init(void)
1286{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001287 struct dentry *dfile;
1288
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301289 gbam_dent = debugfs_create_dir("usb_rmnet", 0);
1290 if (!gbam_dent || IS_ERR(gbam_dent))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291 return;
1292
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301293 dfile = debugfs_create_file("status", 0444, gbam_dent, 0,
1294 &gbam_stats_ops);
1295 if (!dfile || IS_ERR(dfile)) {
1296 debugfs_remove(gbam_dent);
1297 gbam_dent = NULL;
1298 return;
1299 }
1300}
1301static void gbam_debugfs_remove(void)
1302{
1303 debugfs_remove_recursive(gbam_dent);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304}
1305#else
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301306static inline void gbam_debugfs_init(void) {}
1307static inline void gbam_debugfs_remove(void) {}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308#endif
1309
Ofir Cohen77848d62012-12-05 13:16:10 +02001310void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311{
1312 struct gbam_port *port;
1313 unsigned long flags;
1314 struct bam_ch_info *d;
1315
1316 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1317
Ofir Cohena1c2a872011-12-14 10:26:34 +02001318 if (trans == USB_GADGET_XPORT_BAM &&
1319 port_num >= n_bam_ports) {
1320 pr_err("%s: invalid bam portno#%d\n",
1321 __func__, port_num);
1322 return;
1323 }
1324
Ofir Cohenfdecb602012-11-16 15:50:01 +02001325 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1326 trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
Ofir Cohena1c2a872011-12-14 10:26:34 +02001327 port_num >= n_bam2bam_ports) {
1328 pr_err("%s: invalid bam2bam portno#%d\n",
1329 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330 return;
1331 }
1332
1333 if (!gr) {
1334 pr_err("%s: grmnet port is null\n", __func__);
1335 return;
1336 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001337 if (trans == USB_GADGET_XPORT_BAM)
1338 port = bam_ports[port_num].port;
1339 else
1340 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001343 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001345 if (trans == USB_GADGET_XPORT_BAM)
Ofir Cohena1c2a872011-12-14 10:26:34 +02001346 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001347
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301348 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001349 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301350 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301351 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001352 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301353 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001355 /* disable endpoints */
1356 usb_ep_disable(gr->out);
1357 usb_ep_disable(gr->in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001359 gr->in->driver_data = NULL;
1360 gr->out->driver_data = NULL;
1361
Ofir Cohenfdecb602012-11-16 15:50:01 +02001362 if (trans == USB_GADGET_XPORT_BAM ||
1363 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001364 queue_work(gbam_wq, &port->disconnect_w);
Amit Blay94525352012-12-24 11:23:27 +02001365 else if (trans == USB_GADGET_XPORT_BAM2BAM) {
1366 if (port_num == 0) {
1367 if (usb_bam_client_ready(false)) {
1368 pr_err("%s: usb_bam_client_ready failed\n",
1369 __func__);
1370 }
1371 }
1372 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001373}
1374
Ofir Cohena1c2a872011-12-14 10:26:34 +02001375int gbam_connect(struct grmnet *gr, u8 port_num,
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001376 enum transport_type trans, u8 src_connection_idx,
1377 u8 dst_connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378{
1379 struct gbam_port *port;
1380 struct bam_ch_info *d;
1381 int ret;
1382 unsigned long flags;
1383
1384 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1385
Ofir Cohena1c2a872011-12-14 10:26:34 +02001386 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1387 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1388 return -ENODEV;
1389 }
1390
Ofir Cohenfdecb602012-11-16 15:50:01 +02001391 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1392 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
1393 && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1395 return -ENODEV;
1396 }
1397
1398 if (!gr) {
1399 pr_err("%s: grmnet port is null\n", __func__);
1400 return -ENODEV;
1401 }
1402
Ofir Cohena1c2a872011-12-14 10:26:34 +02001403 if (trans == USB_GADGET_XPORT_BAM)
1404 port = bam_ports[port_num].port;
1405 else
1406 port = bam2bam_ports[port_num];
1407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 d = &port->data_ch;
1409
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001410 ret = usb_ep_enable(gr->in);
1411 if (ret) {
1412 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1413 __func__, gr->in);
1414 return ret;
1415 }
1416 gr->in->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001417
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001418 ret = usb_ep_enable(gr->out);
1419 if (ret) {
1420 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1421 __func__, gr->out);
1422 gr->in->driver_data = 0;
1423 return ret;
1424 }
1425 gr->out->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001426
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301427 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001428 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301429 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001430
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001431 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohena1c2a872011-12-14 10:26:34 +02001432 d->to_host = 0;
1433 d->to_modem = 0;
1434 d->pending_with_bam = 0;
1435 d->tohost_drp_cnt = 0;
1436 d->tomodem_drp_cnt = 0;
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001437 }
1438
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001439 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301440 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441
Ofir Cohen4da266f2012-01-03 10:19:29 +02001442 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1443 port->gr = gr;
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001444 d->src_connection_idx = src_connection_idx;
1445 d->dst_connection_idx = dst_connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +02001446 } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohenfdecb602012-11-16 15:50:01 +02001447 port->gr = gr;
Ofir Cohen77848d62012-12-05 13:16:10 +02001448 d->ipa_params.src_pipe = &(d->src_pipe_idx);
1449 d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
Shimrit Malichidbf43d72013-03-16 03:32:27 +02001450 d->ipa_params.src_idx = src_connection_idx;
1451 d->ipa_params.dst_idx = dst_connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001452 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453
Ofir Cohenfdecb602012-11-16 15:50:01 +02001454 d->trans = trans;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 queue_work(gbam_wq, &port->connect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 return 0;
1457}
1458
Ofir Cohena1c2a872011-12-14 10:26:34 +02001459int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460{
1461 int i;
1462 int ret;
1463
Ofir Cohena1c2a872011-12-14 10:26:34 +02001464 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1465 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466
Ofir Cohena1c2a872011-12-14 10:26:34 +02001467 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1468 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1469 pr_err("%s: Invalid num of ports count:%d,%d\n",
1470 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 return -EINVAL;
1472 }
1473
1474 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1475 if (!gbam_wq) {
1476 pr_err("%s: Unable to create workqueue gbam_wq\n",
1477 __func__);
1478 return -ENOMEM;
1479 }
1480
Ofir Cohena1c2a872011-12-14 10:26:34 +02001481 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301482 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483 ret = gbam_port_alloc(i);
1484 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301485 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1487 goto free_bam_ports;
1488 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 }
1490
Ofir Cohena1c2a872011-12-14 10:26:34 +02001491 for (i = 0; i < no_bam2bam_port; i++) {
1492 n_bam2bam_ports++;
1493 ret = gbam2bam_port_alloc(i);
1494 if (ret) {
1495 n_bam2bam_ports--;
1496 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1497 goto free_bam_ports;
1498 }
1499 }
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001504free_bam_ports:
1505 for (i = 0; i < n_bam_ports; i++)
1506 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001507 for (i = 0; i < n_bam2bam_ports; i++)
1508 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 destroy_workqueue(gbam_wq);
1510
1511 return ret;
1512}
Amit Blaye5bb35e2012-05-08 20:38:20 +03001513
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301514void gbam_cleanup(void)
1515{
1516 gbam_debugfs_remove();
1517}
1518
Amit Blaye5bb35e2012-05-08 20:38:20 +03001519void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
1520{
1521 struct gbam_port *port;
1522 struct bam_ch_info *d;
1523
Ofir Cohenfdecb602012-11-16 15:50:01 +02001524 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1525 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001526 return;
1527
1528 port = bam2bam_ports[port_num];
1529 d = &port->data_ch;
1530
1531 pr_debug("%s: suspended port %d\n", __func__, port_num);
1532
Bar Weiner84e1ba02013-07-18 09:08:03 +03001533 queue_work(gbam_wq, &port->suspend_w);
Amit Blaye5bb35e2012-05-08 20:38:20 +03001534}
1535
1536void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
1537{
1538 struct gbam_port *port;
1539 struct bam_ch_info *d;
1540
Ofir Cohenfdecb602012-11-16 15:50:01 +02001541 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1542 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001543 return;
1544
1545 port = bam2bam_ports[port_num];
1546 d = &port->data_ch;
1547
1548 pr_debug("%s: resumed port %d\n", __func__, port_num);
1549
Bar Weiner84e1ba02013-07-18 09:08:03 +03001550 queue_work(gbam_wq, &port->resume_w);
Amit Blaye5bb35e2012-05-08 20:38:20 +03001551}