blob: aa93a7ddacf8dfc7002d09693e7dd93b596579ae [file] [log] [blame]
Anna Perel97b8c222012-01-18 10:08:14 +02001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
Shimrit Malichi194fe122012-07-25 13:50:41 +030027#include <linux/usb/msm_hsusb.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020028#include <mach/usb_bam.h>
29
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include "u_rmnet.h"
31
32#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020033#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static struct workqueue_struct *gbam_wq;
36static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020037static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053038static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039static unsigned bam_ch_ids[] = { 8 };
40
Jack Phameffd4ae2011-08-03 16:49:36 -070041static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
42
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080045#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
46#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#define BAM_MUX_HDR 8
50
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051#define BAM_MUX_RX_Q_SIZE 16
52#define BAM_MUX_TX_Q_SIZE 200
Manu Gautam15203302012-09-26 11:12:54 +053053#define BAM_MUX_RX_REQ_SIZE 2048 /* Must be 1KB aligned */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053055#define DL_INTR_THRESHOLD 20
56
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
58module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
61module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
64module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
67module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Vamsi Krishna8f24f252011-11-02 11:46:08 -070069unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
70module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
Vamsi Krishna8f24f252011-11-02 11:46:08 -070072unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
73module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Vamsi Krishna8f24f252011-11-02 11:46:08 -070075unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
76module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053078unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
79module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
80
Jack Phameffd4ae2011-08-03 16:49:36 -070081#define BAM_CH_OPENED BIT(0)
82#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070085 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 unsigned id;
87
88 struct list_head tx_idle;
89 struct sk_buff_head tx_skb_q;
90
91 struct list_head rx_idle;
92 struct sk_buff_head rx_skb_q;
93
94 struct gbam_port *port;
95 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053096 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097
Ofir Cohena1c2a872011-12-14 10:26:34 +020098 struct usb_request *rx_req;
99 struct usb_request *tx_req;
100
Shimrit Malichi255b5342012-08-02 21:01:43 +0300101 u32 src_pipe_idx;
102 u32 dst_pipe_idx;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200103 u8 connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200104 enum transport_type trans;
Ofir Cohen77848d62012-12-05 13:16:10 +0200105 struct usb_bam_connect_ipa_params ipa_params;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 /* stats */
108 unsigned int pending_with_bam;
109 unsigned int tohost_drp_cnt;
110 unsigned int tomodem_drp_cnt;
111 unsigned int tx_len;
112 unsigned int rx_len;
113 unsigned long to_modem;
114 unsigned long to_host;
115};
116
117struct gbam_port {
118 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530119 spinlock_t port_lock_ul;
120 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121
122 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200123 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124
125 struct bam_ch_info data_ch;
126
127 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800128 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129};
130
131static struct bam_portmaster {
132 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700133 struct platform_driver pdrv;
134} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135
Ofir Cohena1c2a872011-12-14 10:26:34 +0200136struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200138static void gbam_start_endless_rx(struct gbam_port *port);
139static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140
141/*---------------misc functions---------------- */
142static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
143{
144 struct usb_request *req;
145
146 while (!list_empty(head)) {
147 req = list_entry(head->next, struct usb_request, list);
148 list_del(&req->list);
149 usb_ep_free_request(ep, req);
150 }
151}
152
153static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
154 int num,
155 void (*cb)(struct usb_ep *ep, struct usb_request *),
156 gfp_t flags)
157{
158 int i;
159 struct usb_request *req;
160
161 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
162 ep, head, num, cb);
163
164 for (i = 0; i < num; i++) {
165 req = usb_ep_alloc_request(ep, flags);
166 if (!req) {
167 pr_debug("%s: req allocated:%d\n", __func__, i);
168 return list_empty(head) ? -ENOMEM : 0;
169 }
170 req->complete = cb;
171 list_add(&req->list, head);
172 }
173
174 return 0;
175}
176/*--------------------------------------------- */
177
178/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530179static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180{
181 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530182 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 struct sk_buff *skb;
184 int ret;
185 struct usb_request *req;
186 struct usb_ep *ep;
187
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530188 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530190 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 return;
192 }
193
194 ep = port->port_usb->in;
195
196 while (!list_empty(&d->tx_idle)) {
197 skb = __skb_dequeue(&d->tx_skb_q);
198 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530199 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 return;
201 }
202 req = list_first_entry(&d->tx_idle,
203 struct usb_request,
204 list);
205 req->context = skb;
206 req->buf = skb->data;
207 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530208 n_tx_req_queued++;
209 if (n_tx_req_queued == dl_intr_threshold) {
210 req->no_interrupt = 0;
211 n_tx_req_queued = 0;
212 } else {
213 req->no_interrupt = 1;
214 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215
216 list_del(&req->list);
217
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530218 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530220 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 if (ret) {
222 pr_err("%s: usb epIn failed\n", __func__);
223 list_add(&req->list, &d->tx_idle);
224 dev_kfree_skb_any(skb);
225 break;
226 }
227 d->to_host++;
228 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530229 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230}
231
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530232static void gbam_write_data_tohost_w(struct work_struct *w)
233{
234 struct bam_ch_info *d;
235 struct gbam_port *port;
236
237 d = container_of(w, struct bam_ch_info, write_tohost_w);
238 port = d->port;
239
240 gbam_write_data_tohost(port);
241}
242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243void gbam_data_recv_cb(void *p, struct sk_buff *skb)
244{
245 struct gbam_port *port = p;
246 struct bam_ch_info *d = &port->data_ch;
247 unsigned long flags;
248
249 if (!skb)
250 return;
251
252 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
253 port, port->port_num, d, skb->len);
254
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530255 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530257 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 dev_kfree_skb_any(skb);
259 return;
260 }
261
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700262 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 d->tohost_drp_cnt++;
264 if (printk_ratelimit())
265 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
266 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530267 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 dev_kfree_skb_any(skb);
269 return;
270 }
271
272 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530273 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530275 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276}
277
278void gbam_data_write_done(void *p, struct sk_buff *skb)
279{
280 struct gbam_port *port = p;
281 struct bam_ch_info *d = &port->data_ch;
282 unsigned long flags;
283
284 if (!skb)
285 return;
286
287 dev_kfree_skb_any(skb);
288
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530289 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290
291 d->pending_with_bam--;
292
293 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
294 port, d, d->to_modem,
295 d->pending_with_bam, port->port_num);
296
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530297 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298
Vamsi Krishna84579552011-11-09 15:33:22 -0800299 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300}
301
302static void gbam_data_write_tobam(struct work_struct *w)
303{
304 struct gbam_port *port;
305 struct bam_ch_info *d;
306 struct sk_buff *skb;
307 unsigned long flags;
308 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800309 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310
311 d = container_of(w, struct bam_ch_info, write_tobam_w);
312 port = d->port;
313
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530314 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530316 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 return;
318 }
319
Vamsi Krishna84579552011-11-09 15:33:22 -0800320 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800321 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800322 if (!skb)
323 break;
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 d->pending_with_bam++;
326 d->to_modem++;
327
328 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
329 port, d, d->to_modem, d->pending_with_bam,
330 port->port_num);
331
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530332 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530334 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 if (ret) {
336 pr_debug("%s: write error:%d\n", __func__, ret);
337 d->pending_with_bam--;
338 d->to_modem--;
339 d->tomodem_drp_cnt++;
340 dev_kfree_skb_any(skb);
341 break;
342 }
343 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800344
345 qlen = d->rx_skb_q.qlen;
346
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530347 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800348
349 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
350 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351}
352/*-------------------------------------------------------------*/
353
354static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
355{
356 struct gbam_port *port = ep->driver_data;
357 struct bam_ch_info *d;
358 struct sk_buff *skb = req->context;
359 int status = req->status;
360
361 switch (status) {
362 case 0:
363 /* successful completion */
364 case -ECONNRESET:
365 case -ESHUTDOWN:
366 /* connection gone */
367 break;
368 default:
369 pr_err("%s: data tx ep error %d\n",
370 __func__, status);
371 break;
372 }
373
374 dev_kfree_skb_any(skb);
375
376 if (!port)
377 return;
378
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530379 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 d = &port->data_ch;
381 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530382 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530384 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385}
386
387static void
388gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
389{
390 struct gbam_port *port = ep->driver_data;
391 struct bam_ch_info *d = &port->data_ch;
392 struct sk_buff *skb = req->context;
393 int status = req->status;
394 int queue = 0;
395
396 switch (status) {
397 case 0:
398 skb_put(skb, req->actual);
399 queue = 1;
400 break;
401 case -ECONNRESET:
402 case -ESHUTDOWN:
403 /* cable disconnection */
404 dev_kfree_skb_any(skb);
405 req->buf = 0;
406 usb_ep_free_request(ep, req);
407 return;
408 default:
409 if (printk_ratelimit())
410 pr_err("%s: %s response error %d, %d/%d\n",
411 __func__, ep->name, status,
412 req->actual, req->length);
413 dev_kfree_skb_any(skb);
414 break;
415 }
416
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530417 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 if (queue) {
419 __skb_queue_tail(&d->rx_skb_q, skb);
420 queue_work(gbam_wq, &d->write_tobam_w);
421 }
422
423 /* TODO: Handle flow control gracefully by having
424 * having call back mechanism from bam driver
425 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700426 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800427 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428
429 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530430 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 return;
432 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530433 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700435 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530437 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530439 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 return;
441 }
442 skb_reserve(skb, BAM_MUX_HDR);
443
444 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700445 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 req->context = skb;
447
448 status = usb_ep_queue(ep, req, GFP_ATOMIC);
449 if (status) {
450 dev_kfree_skb_any(skb);
451
452 if (printk_ratelimit())
453 pr_err("%s: data rx enqueue err %d\n",
454 __func__, status);
455
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530456 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530458 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 }
460}
461
Ofir Cohena1c2a872011-12-14 10:26:34 +0200462static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
463{
464 int status = req->status;
465
466 pr_debug("%s status: %d\n", __func__, status);
467}
468
469static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
470{
471 int status = req->status;
472
473 pr_debug("%s status: %d\n", __func__, status);
474}
475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476static void gbam_start_rx(struct gbam_port *port)
477{
478 struct usb_request *req;
479 struct bam_ch_info *d;
480 struct usb_ep *ep;
481 unsigned long flags;
482 int ret;
483 struct sk_buff *skb;
484
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530485 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530487 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 return;
489 }
490
491 d = &port->data_ch;
492 ep = port->port_usb->out;
493
494 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800495
496 if (bam_mux_rx_fctrl_support &&
497 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
498 break;
499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 req = list_first_entry(&d->rx_idle, struct usb_request, list);
501
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700502 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 if (!skb)
504 break;
505 skb_reserve(skb, BAM_MUX_HDR);
506
507 list_del(&req->list);
508 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700509 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 req->context = skb;
511
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530512 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530514 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 if (ret) {
516 dev_kfree_skb_any(skb);
517
518 if (printk_ratelimit())
519 pr_err("%s: rx queue failed\n", __func__);
520
521 if (port->port_usb)
522 list_add(&req->list, &d->rx_idle);
523 else
524 usb_ep_free_request(ep, req);
525 break;
526 }
527 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530528 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529}
530
Ofir Cohena1c2a872011-12-14 10:26:34 +0200531static void gbam_start_endless_rx(struct gbam_port *port)
532{
533 struct bam_ch_info *d = &port->data_ch;
534 int status;
535
536 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
537 if (status)
538 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
539}
540
541static void gbam_start_endless_tx(struct gbam_port *port)
542{
543 struct bam_ch_info *d = &port->data_ch;
544 int status;
545
546 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
547 if (status)
548 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
549}
550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551static void gbam_start_io(struct gbam_port *port)
552{
553 unsigned long flags;
554 struct usb_ep *ep;
555 int ret;
556 struct bam_ch_info *d;
557
558 pr_debug("%s: port:%p\n", __func__, port);
559
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530560 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530562 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 return;
564 }
565
566 d = &port->data_ch;
567 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700568 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 gbam_epout_complete, GFP_ATOMIC);
570 if (ret) {
571 pr_err("%s: rx req allocation failed\n", __func__);
572 return;
573 }
574
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530575 spin_unlock_irqrestore(&port->port_lock_ul, flags);
576 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700578 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 gbam_epin_complete, GFP_ATOMIC);
580 if (ret) {
581 pr_err("%s: tx req allocation failed\n", __func__);
582 gbam_free_requests(ep, &d->rx_idle);
583 return;
584 }
585
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530586 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587
588 /* queue out requests */
589 gbam_start_rx(port);
590}
591
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600592static void gbam_notify(void *p, int event, unsigned long data)
593{
594 switch (event) {
595 case BAM_DMUX_RECEIVE:
596 gbam_data_recv_cb(p, (struct sk_buff *)(data));
597 break;
598 case BAM_DMUX_WRITE_DONE:
599 gbam_data_write_done(p, (struct sk_buff *)(data));
600 break;
601 }
602}
603
Ofir Cohena1c2a872011-12-14 10:26:34 +0200604static void gbam_free_buffers(struct gbam_port *port)
605{
606 struct sk_buff *skb;
607 unsigned long flags;
608 struct bam_ch_info *d;
609
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530610 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800611 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200612
613 if (!port || !port->port_usb)
614 goto free_buf_out;
615
616 d = &port->data_ch;
617
618 gbam_free_requests(port->port_usb->in, &d->tx_idle);
619 gbam_free_requests(port->port_usb->out, &d->rx_idle);
620
621 while ((skb = __skb_dequeue(&d->tx_skb_q)))
622 dev_kfree_skb_any(skb);
623
624 while ((skb = __skb_dequeue(&d->rx_skb_q)))
625 dev_kfree_skb_any(skb);
626
627free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800628 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530629 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200630}
631
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800632static void gbam_disconnect_work(struct work_struct *w)
633{
634 struct gbam_port *port =
635 container_of(w, struct gbam_port, disconnect_w);
636 struct bam_ch_info *d = &port->data_ch;
637
638 if (!test_bit(BAM_CH_OPENED, &d->flags))
639 return;
640
641 msm_bam_dmux_close(d->id);
642 clear_bit(BAM_CH_OPENED, &d->flags);
643}
644
Ofir Cohenfdecb602012-11-16 15:50:01 +0200645static void gbam2bam_disconnect_work(struct work_struct *w)
646{
647 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
648 struct bam_ch_info *d = &port->data_ch;
649 int ret;
650
651 if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohen77848d62012-12-05 13:16:10 +0200652 ret = usb_bam_disconnect_ipa(d->connection_idx, &d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200653 if (ret)
654 pr_err("%s: usb_bam_disconnect_ipa failed: err:%d\n",
655 __func__, ret);
656 rmnet_bridge_disconnect();
657 }
658}
659
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660static void gbam_connect_work(struct work_struct *w)
661{
662 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
663 struct bam_ch_info *d = &port->data_ch;
664 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800665 unsigned long flags;
666
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530667 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800668 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800669 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800670 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530671 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800672 return;
673 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800674 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530675 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676
Jack Phameffd4ae2011-08-03 16:49:36 -0700677 if (!test_bit(BAM_CH_READY, &d->flags))
678 return;
679
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600680 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 if (ret) {
682 pr_err("%s: unable open bam ch:%d err:%d\n",
683 __func__, d->id, ret);
684 return;
685 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700686 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687
688 gbam_start_io(port);
689
690 pr_debug("%s: done\n", __func__);
691}
692
Ofir Cohena1c2a872011-12-14 10:26:34 +0200693static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700694{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200695 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
696 struct bam_ch_info *d = &port->data_ch;
697 u32 sps_params;
698 int ret;
Jack Phameffd4ae2011-08-03 16:49:36 -0700699
Ofir Cohenfdecb602012-11-16 15:50:01 +0200700 if (d->trans == USB_GADGET_XPORT_BAM2BAM) {
701 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
702 &d->dst_pipe_idx);
703 if (ret) {
704 pr_err("%s: usb_bam_connect failed: err:%d\n",
705 __func__, ret);
706 return;
707 }
708 } else if (d->trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohen77848d62012-12-05 13:16:10 +0200709 d->ipa_params.client = IPA_CLIENT_USB_CONS;
710 d->ipa_params.dir = PEER_PERIPHERAL_TO_USB;
711 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200712 if (ret) {
713 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
714 __func__, ret);
715 return;
716 }
717
Ofir Cohen77848d62012-12-05 13:16:10 +0200718 d->ipa_params.client = IPA_CLIENT_USB_PROD;
719 d->ipa_params.dir = USB_TO_PEER_PERIPHERAL;
Ofir Cohenfdecb602012-11-16 15:50:01 +0200720 /* Currently only DMA mode is supported */
Ofir Cohen77848d62012-12-05 13:16:10 +0200721 d->ipa_params.ipa_ep_cfg.mode.mode = IPA_DMA;
722 d->ipa_params.ipa_ep_cfg.mode.dst =
Ofir Cohenfdecb602012-11-16 15:50:01 +0200723 IPA_CLIENT_A2_TETHERED_CONS;
Ofir Cohen77848d62012-12-05 13:16:10 +0200724 ret = usb_bam_connect_ipa(&d->ipa_params);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200725 if (ret) {
726 pr_err("%s: usb_bam_connect_ipa failed: err:%d\n",
727 __func__, ret);
728 return;
729 }
Ofir Cohen77848d62012-12-05 13:16:10 +0200730 rmnet_bridge_connect(d->ipa_params.prod_clnt_hdl,
731 d->ipa_params.cons_clnt_hdl, 0);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200732 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700733
Ofir Cohena1c2a872011-12-14 10:26:34 +0200734 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
735 if (!d->rx_req)
736 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700737
Ofir Cohena1c2a872011-12-14 10:26:34 +0200738 d->rx_req->context = port;
739 d->rx_req->complete = gbam_endless_rx_complete;
740 d->rx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200741 sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
742 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200743 d->rx_req->udc_priv = sps_params;
744 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
745 if (!d->tx_req)
746 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700747
Ofir Cohena1c2a872011-12-14 10:26:34 +0200748 d->tx_req->context = port;
749 d->tx_req->complete = gbam_endless_tx_complete;
750 d->tx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200751 sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
752 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200753 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700754
Ofir Cohena1c2a872011-12-14 10:26:34 +0200755 /* queue in & out requests */
756 gbam_start_endless_rx(port);
757 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700758
Ofir Cohena1c2a872011-12-14 10:26:34 +0200759 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700760}
761
762/* BAM data channel ready, allow attempt to open */
763static int gbam_data_ch_probe(struct platform_device *pdev)
764{
765 struct gbam_port *port;
766 struct bam_ch_info *d;
767 int i;
768 unsigned long flags;
769
770 pr_debug("%s: name:%s\n", __func__, pdev->name);
771
772 for (i = 0; i < n_bam_ports; i++) {
773 port = bam_ports[i].port;
774 d = &port->data_ch;
775
776 if (!strncmp(bam_ch_names[i], pdev->name,
777 BAM_DMUX_CH_NAME_MAX_LEN)) {
778 set_bit(BAM_CH_READY, &d->flags);
779
780 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530781 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800782 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700783 if (port->port_usb)
784 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800785 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530786 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700787
788 break;
789 }
790 }
791
792 return 0;
793}
794
795/* BAM data channel went inactive, so close it */
796static int gbam_data_ch_remove(struct platform_device *pdev)
797{
798 struct gbam_port *port;
799 struct bam_ch_info *d;
800 struct usb_ep *ep_in = NULL;
801 struct usb_ep *ep_out = NULL;
802 unsigned long flags;
803 int i;
804
805 pr_debug("%s: name:%s\n", __func__, pdev->name);
806
807 for (i = 0; i < n_bam_ports; i++) {
808 if (!strncmp(bam_ch_names[i], pdev->name,
809 BAM_DMUX_CH_NAME_MAX_LEN)) {
810 port = bam_ports[i].port;
811 d = &port->data_ch;
812
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530813 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800814 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700815 if (port->port_usb) {
816 ep_in = port->port_usb->in;
817 ep_out = port->port_usb->out;
818 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800819 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530820 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700821
822 if (ep_in)
823 usb_ep_fifo_flush(ep_in);
824 if (ep_out)
825 usb_ep_fifo_flush(ep_out);
826
827 gbam_free_buffers(port);
828
829 msm_bam_dmux_close(d->id);
830
Vamsi Krishna7658bd12012-01-13 10:32:00 -0800831 /* bam dmux will free all pending skbs */
832 d->pending_with_bam = 0;
833
Jack Phameffd4ae2011-08-03 16:49:36 -0700834 clear_bit(BAM_CH_READY, &d->flags);
835 clear_bit(BAM_CH_OPENED, &d->flags);
836 }
837 }
838
839 return 0;
840}
841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842static void gbam_port_free(int portno)
843{
844 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700845 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846
Jack Phameffd4ae2011-08-03 16:49:36 -0700847 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700849 platform_driver_unregister(pdrv);
850 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851}
852
Ofir Cohena1c2a872011-12-14 10:26:34 +0200853static void gbam2bam_port_free(int portno)
854{
855 struct gbam_port *port = bam2bam_ports[portno];
856
857 kfree(port);
858}
859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860static int gbam_port_alloc(int portno)
861{
862 struct gbam_port *port;
863 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700864 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700865
866 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
867 if (!port)
868 return -ENOMEM;
869
870 port->port_num = portno;
871
872 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530873 spin_lock_init(&port->port_lock_ul);
874 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800876 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877
878 /* data ch */
879 d = &port->data_ch;
880 d->port = port;
881 INIT_LIST_HEAD(&d->tx_idle);
882 INIT_LIST_HEAD(&d->rx_idle);
883 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530884 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 skb_queue_head_init(&d->tx_skb_q);
886 skb_queue_head_init(&d->rx_skb_q);
887 d->id = bam_ch_ids[portno];
888
889 bam_ports[portno].port = port;
890
Jack Phameffd4ae2011-08-03 16:49:36 -0700891 pdrv = &bam_ports[portno].pdrv;
892 pdrv->probe = gbam_data_ch_probe;
893 pdrv->remove = gbam_data_ch_remove;
894 pdrv->driver.name = bam_ch_names[portno];
895 pdrv->driver.owner = THIS_MODULE;
896
897 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200898 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
899
900 return 0;
901}
902
903static int gbam2bam_port_alloc(int portno)
904{
905 struct gbam_port *port;
906 struct bam_ch_info *d;
907
908 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
909 if (!port)
910 return -ENOMEM;
911
912 port->port_num = portno;
913
914 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530915 spin_lock_init(&port->port_lock_ul);
916 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200917
918 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
Ofir Cohenfdecb602012-11-16 15:50:01 +0200919 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200920
921 /* data ch */
922 d = &port->data_ch;
923 d->port = port;
924 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
927
928 return 0;
929}
930
931#if defined(CONFIG_DEBUG_FS)
932#define DEBUG_BUF_SIZE 1024
933static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
934 size_t count, loff_t *ppos)
935{
936 struct gbam_port *port;
937 struct bam_ch_info *d;
938 char *buf;
939 unsigned long flags;
940 int ret;
941 int i;
942 int temp = 0;
943
944 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
945 if (!buf)
946 return -ENOMEM;
947
948 for (i = 0; i < n_bam_ports; i++) {
949 port = bam_ports[i].port;
950 if (!port)
951 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530952 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800953 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954
955 d = &port->data_ch;
956
957 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
958 "#PORT:%d port:%p data_ch:%p#\n"
959 "dpkts_to_usbhost: %lu\n"
960 "dpkts_to_modem: %lu\n"
961 "dpkts_pwith_bam: %u\n"
962 "to_usbhost_dcnt: %u\n"
963 "tomodem__dcnt: %u\n"
964 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800965 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700966 "data_ch_open: %d\n"
967 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 i, port, &port->data_ch,
969 d->to_host, d->to_modem,
970 d->pending_with_bam,
971 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800972 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700973 test_bit(BAM_CH_OPENED, &d->flags),
974 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800976 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530977 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 }
979
980 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
981
982 kfree(buf);
983
984 return ret;
985}
986
987static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
988 size_t count, loff_t *ppos)
989{
990 struct gbam_port *port;
991 struct bam_ch_info *d;
992 int i;
993 unsigned long flags;
994
995 for (i = 0; i < n_bam_ports; i++) {
996 port = bam_ports[i].port;
997 if (!port)
998 continue;
999
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301000 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001001 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002
1003 d = &port->data_ch;
1004
1005 d->to_host = 0;
1006 d->to_modem = 0;
1007 d->pending_with_bam = 0;
1008 d->tohost_drp_cnt = 0;
1009 d->tomodem_drp_cnt = 0;
1010
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001011 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301012 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001013 }
1014 return count;
1015}
1016
1017const struct file_operations gbam_stats_ops = {
1018 .read = gbam_read_stats,
1019 .write = gbam_reset_stats,
1020};
1021
1022static void gbam_debugfs_init(void)
1023{
1024 struct dentry *dent;
1025 struct dentry *dfile;
1026
1027 dent = debugfs_create_dir("usb_rmnet", 0);
1028 if (IS_ERR(dent))
1029 return;
1030
1031 /* TODO: Implement cleanup function to remove created file */
1032 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1033 if (!dfile || IS_ERR(dfile))
1034 debugfs_remove(dent);
1035}
1036#else
1037static void gam_debugfs_init(void) { }
1038#endif
1039
Ofir Cohen77848d62012-12-05 13:16:10 +02001040void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041{
1042 struct gbam_port *port;
1043 unsigned long flags;
1044 struct bam_ch_info *d;
1045
1046 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1047
Ofir Cohena1c2a872011-12-14 10:26:34 +02001048 if (trans == USB_GADGET_XPORT_BAM &&
1049 port_num >= n_bam_ports) {
1050 pr_err("%s: invalid bam portno#%d\n",
1051 __func__, port_num);
1052 return;
1053 }
1054
Ofir Cohenfdecb602012-11-16 15:50:01 +02001055 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1056 trans == USB_GADGET_XPORT_BAM2BAM_IPA) &&
Ofir Cohena1c2a872011-12-14 10:26:34 +02001057 port_num >= n_bam2bam_ports) {
1058 pr_err("%s: invalid bam2bam portno#%d\n",
1059 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 return;
1061 }
1062
1063 if (!gr) {
1064 pr_err("%s: grmnet port is null\n", __func__);
1065 return;
1066 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001067 if (trans == USB_GADGET_XPORT_BAM)
1068 port = bam_ports[port_num].port;
1069 else
1070 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001073 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001074
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001075 if (trans == USB_GADGET_XPORT_BAM)
Ofir Cohena1c2a872011-12-14 10:26:34 +02001076 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301078 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001079 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301080 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301081 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001082 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301083 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001085 /* disable endpoints */
1086 usb_ep_disable(gr->out);
1087 usb_ep_disable(gr->in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001089 gr->in->driver_data = NULL;
1090 gr->out->driver_data = NULL;
1091
Ofir Cohenfdecb602012-11-16 15:50:01 +02001092 if (trans == USB_GADGET_XPORT_BAM ||
1093 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001094 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095}
1096
Ofir Cohena1c2a872011-12-14 10:26:34 +02001097int gbam_connect(struct grmnet *gr, u8 port_num,
Ofir Cohen77848d62012-12-05 13:16:10 +02001098 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099{
1100 struct gbam_port *port;
1101 struct bam_ch_info *d;
1102 int ret;
1103 unsigned long flags;
1104
1105 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1106
Ofir Cohena1c2a872011-12-14 10:26:34 +02001107 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1108 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1109 return -ENODEV;
1110 }
1111
Ofir Cohenfdecb602012-11-16 15:50:01 +02001112 if ((trans == USB_GADGET_XPORT_BAM2BAM ||
1113 trans == USB_GADGET_XPORT_BAM2BAM_IPA)
1114 && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1116 return -ENODEV;
1117 }
1118
1119 if (!gr) {
1120 pr_err("%s: grmnet port is null\n", __func__);
1121 return -ENODEV;
1122 }
1123
Ofir Cohena1c2a872011-12-14 10:26:34 +02001124 if (trans == USB_GADGET_XPORT_BAM)
1125 port = bam_ports[port_num].port;
1126 else
1127 port = bam2bam_ports[port_num];
1128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 d = &port->data_ch;
1130
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001131 ret = usb_ep_enable(gr->in);
1132 if (ret) {
1133 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1134 __func__, gr->in);
1135 return ret;
1136 }
1137 gr->in->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001138
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001139 ret = usb_ep_enable(gr->out);
1140 if (ret) {
1141 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1142 __func__, gr->out);
1143 gr->in->driver_data = 0;
1144 return ret;
1145 }
1146 gr->out->driver_data = port;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001147
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301148 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001149 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301150 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001151
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001152 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohena1c2a872011-12-14 10:26:34 +02001153 d->to_host = 0;
1154 d->to_modem = 0;
1155 d->pending_with_bam = 0;
1156 d->tohost_drp_cnt = 0;
1157 d->tomodem_drp_cnt = 0;
Ido Shayevitzeb8d8312012-09-10 11:13:11 +03001158 }
1159
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001160 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301161 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162
Ofir Cohen4da266f2012-01-03 10:19:29 +02001163 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1164 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001165 d->connection_idx = connection_idx;
Ofir Cohenfdecb602012-11-16 15:50:01 +02001166 } else if (trans == USB_GADGET_XPORT_BAM2BAM_IPA) {
Ofir Cohenfdecb602012-11-16 15:50:01 +02001167 port->gr = gr;
Ofir Cohen77848d62012-12-05 13:16:10 +02001168 d->ipa_params.src_pipe = &(d->src_pipe_idx);
1169 d->ipa_params.dst_pipe = &(d->dst_pipe_idx);
1170 d->ipa_params.idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001171 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172
Ofir Cohenfdecb602012-11-16 15:50:01 +02001173 d->trans = trans;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 queue_work(gbam_wq, &port->connect_w);
1175
1176 return 0;
1177}
1178
Ofir Cohena1c2a872011-12-14 10:26:34 +02001179int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180{
1181 int i;
1182 int ret;
1183
Ofir Cohena1c2a872011-12-14 10:26:34 +02001184 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1185 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186
Ofir Cohena1c2a872011-12-14 10:26:34 +02001187 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1188 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1189 pr_err("%s: Invalid num of ports count:%d,%d\n",
1190 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 return -EINVAL;
1192 }
1193
1194 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1195 if (!gbam_wq) {
1196 pr_err("%s: Unable to create workqueue gbam_wq\n",
1197 __func__);
1198 return -ENOMEM;
1199 }
1200
Ofir Cohena1c2a872011-12-14 10:26:34 +02001201 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301202 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 ret = gbam_port_alloc(i);
1204 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301205 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1207 goto free_bam_ports;
1208 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 }
1210
Ofir Cohena1c2a872011-12-14 10:26:34 +02001211 for (i = 0; i < no_bam2bam_port; i++) {
1212 n_bam2bam_ports++;
1213 ret = gbam2bam_port_alloc(i);
1214 if (ret) {
1215 n_bam2bam_ports--;
1216 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1217 goto free_bam_ports;
1218 }
1219 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223free_bam_ports:
1224 for (i = 0; i < n_bam_ports; i++)
1225 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001226 for (i = 0; i < n_bam2bam_ports; i++)
1227 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 destroy_workqueue(gbam_wq);
1229
1230 return ret;
1231}
Amit Blaye5bb35e2012-05-08 20:38:20 +03001232
1233static int gbam_wake_cb(void *param)
1234{
1235 struct gbam_port *port = (struct gbam_port *)param;
1236 struct bam_ch_info *d;
1237 struct f_rmnet *dev;
1238
1239 dev = port_to_rmnet(port->gr);
1240 d = &port->data_ch;
1241
1242 pr_debug("%s: woken up by peer\n", __func__);
1243
1244 return usb_gadget_wakeup(dev->cdev->gadget);
1245}
1246
1247void gbam_suspend(struct grmnet *gr, u8 port_num, enum transport_type trans)
1248{
1249 struct gbam_port *port;
1250 struct bam_ch_info *d;
1251
Ofir Cohenfdecb602012-11-16 15:50:01 +02001252 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1253 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001254 return;
1255
1256 port = bam2bam_ports[port_num];
1257 d = &port->data_ch;
1258
1259 pr_debug("%s: suspended port %d\n", __func__, port_num);
1260
1261 usb_bam_register_wake_cb(d->connection_idx, gbam_wake_cb, port);
1262}
1263
1264void gbam_resume(struct grmnet *gr, u8 port_num, enum transport_type trans)
1265{
1266 struct gbam_port *port;
1267 struct bam_ch_info *d;
1268
Ofir Cohenfdecb602012-11-16 15:50:01 +02001269 if (trans != USB_GADGET_XPORT_BAM2BAM &&
1270 trans != USB_GADGET_XPORT_BAM2BAM_IPA)
Amit Blaye5bb35e2012-05-08 20:38:20 +03001271 return;
1272
1273 port = bam2bam_ports[port_num];
1274 d = &port->data_ch;
1275
1276 pr_debug("%s: resumed port %d\n", __func__, port_num);
1277
1278 usb_bam_register_wake_cb(d->connection_idx, NULL, NULL);
1279}