blob: 6299cdd61e9bfa29c5d156c4798f8390de8baf94 [file] [log] [blame]
Anna Perel97b8c222012-01-18 10:08:14 +02001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
27#include <mach/usb_bam.h>
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "u_rmnet.h"
30
31#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020032#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static struct workqueue_struct *gbam_wq;
35static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020036static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053037static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038static unsigned bam_ch_ids[] = { 8 };
39
Jack Phameffd4ae2011-08-03 16:49:36 -070040static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
41
Vamsi Krishna84579552011-11-09 15:33:22 -080042#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070043#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080044#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
45#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070046#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
48#define BAM_MUX_HDR 8
49
Vamsi Krishna8f24f252011-11-02 11:46:08 -070050#define BAM_MUX_RX_Q_SIZE 16
51#define BAM_MUX_TX_Q_SIZE 200
52#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053054#define DL_INTR_THRESHOLD 20
55
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
57module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
60module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
63module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
66module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Vamsi Krishna8f24f252011-11-02 11:46:08 -070068unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
69module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Vamsi Krishna8f24f252011-11-02 11:46:08 -070071unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
72module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Vamsi Krishna8f24f252011-11-02 11:46:08 -070074unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
75module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053077unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
78module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
79
Jack Phameffd4ae2011-08-03 16:49:36 -070080#define BAM_CH_OPENED BIT(0)
81#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020082#define SPS_PARAMS_PIPE_ID_MASK (0x1F)
83#define SPS_PARAMS_SPS_MODE BIT(5)
84#define SPS_PARAMS_TBE BIT(6)
85#define MSM_VENDOR_ID BIT(16)
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070088 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089 unsigned id;
90
91 struct list_head tx_idle;
92 struct sk_buff_head tx_skb_q;
93
94 struct list_head rx_idle;
95 struct sk_buff_head rx_skb_q;
96
97 struct gbam_port *port;
98 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053099 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100
Ofir Cohena1c2a872011-12-14 10:26:34 +0200101 struct usb_request *rx_req;
102 struct usb_request *tx_req;
103
104 u8 src_pipe_idx;
105 u8 dst_pipe_idx;
106 u8 connection_idx;
107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 /* stats */
109 unsigned int pending_with_bam;
110 unsigned int tohost_drp_cnt;
111 unsigned int tomodem_drp_cnt;
112 unsigned int tx_len;
113 unsigned int rx_len;
114 unsigned long to_modem;
115 unsigned long to_host;
116};
117
118struct gbam_port {
119 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530120 spinlock_t port_lock_ul;
121 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122
123 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200124 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125
126 struct bam_ch_info data_ch;
127
128 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800129 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130};
131
132static struct bam_portmaster {
133 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700134 struct platform_driver pdrv;
135} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136
Ofir Cohena1c2a872011-12-14 10:26:34 +0200137struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200139static void gbam_start_endless_rx(struct gbam_port *port);
140static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141
142/*---------------misc functions---------------- */
143static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
144{
145 struct usb_request *req;
146
147 while (!list_empty(head)) {
148 req = list_entry(head->next, struct usb_request, list);
149 list_del(&req->list);
150 usb_ep_free_request(ep, req);
151 }
152}
153
154static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
155 int num,
156 void (*cb)(struct usb_ep *ep, struct usb_request *),
157 gfp_t flags)
158{
159 int i;
160 struct usb_request *req;
161
162 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
163 ep, head, num, cb);
164
165 for (i = 0; i < num; i++) {
166 req = usb_ep_alloc_request(ep, flags);
167 if (!req) {
168 pr_debug("%s: req allocated:%d\n", __func__, i);
169 return list_empty(head) ? -ENOMEM : 0;
170 }
171 req->complete = cb;
172 list_add(&req->list, head);
173 }
174
175 return 0;
176}
177/*--------------------------------------------- */
178
179/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530180static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181{
182 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530183 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 struct sk_buff *skb;
185 int ret;
186 struct usb_request *req;
187 struct usb_ep *ep;
188
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530189 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530191 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 return;
193 }
194
195 ep = port->port_usb->in;
196
197 while (!list_empty(&d->tx_idle)) {
198 skb = __skb_dequeue(&d->tx_skb_q);
199 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530200 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201 return;
202 }
203 req = list_first_entry(&d->tx_idle,
204 struct usb_request,
205 list);
206 req->context = skb;
207 req->buf = skb->data;
208 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530209 n_tx_req_queued++;
210 if (n_tx_req_queued == dl_intr_threshold) {
211 req->no_interrupt = 0;
212 n_tx_req_queued = 0;
213 } else {
214 req->no_interrupt = 1;
215 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216
217 list_del(&req->list);
218
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530219 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530221 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 if (ret) {
223 pr_err("%s: usb epIn failed\n", __func__);
224 list_add(&req->list, &d->tx_idle);
225 dev_kfree_skb_any(skb);
226 break;
227 }
228 d->to_host++;
229 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530230 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231}
232
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530233static void gbam_write_data_tohost_w(struct work_struct *w)
234{
235 struct bam_ch_info *d;
236 struct gbam_port *port;
237
238 d = container_of(w, struct bam_ch_info, write_tohost_w);
239 port = d->port;
240
241 gbam_write_data_tohost(port);
242}
243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244void gbam_data_recv_cb(void *p, struct sk_buff *skb)
245{
246 struct gbam_port *port = p;
247 struct bam_ch_info *d = &port->data_ch;
248 unsigned long flags;
249
250 if (!skb)
251 return;
252
253 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
254 port, port->port_num, d, skb->len);
255
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530256 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530258 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 dev_kfree_skb_any(skb);
260 return;
261 }
262
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700263 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 d->tohost_drp_cnt++;
265 if (printk_ratelimit())
266 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
267 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530268 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 dev_kfree_skb_any(skb);
270 return;
271 }
272
273 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530274 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530276 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277}
278
279void gbam_data_write_done(void *p, struct sk_buff *skb)
280{
281 struct gbam_port *port = p;
282 struct bam_ch_info *d = &port->data_ch;
283 unsigned long flags;
284
285 if (!skb)
286 return;
287
288 dev_kfree_skb_any(skb);
289
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530290 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291
292 d->pending_with_bam--;
293
294 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
295 port, d, d->to_modem,
296 d->pending_with_bam, port->port_num);
297
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530298 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299
Vamsi Krishna84579552011-11-09 15:33:22 -0800300 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301}
302
303static void gbam_data_write_tobam(struct work_struct *w)
304{
305 struct gbam_port *port;
306 struct bam_ch_info *d;
307 struct sk_buff *skb;
308 unsigned long flags;
309 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800310 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311
312 d = container_of(w, struct bam_ch_info, write_tobam_w);
313 port = d->port;
314
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530315 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530317 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 return;
319 }
320
Vamsi Krishna84579552011-11-09 15:33:22 -0800321 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800322 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800323 if (!skb)
324 break;
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 d->pending_with_bam++;
327 d->to_modem++;
328
329 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
330 port, d, d->to_modem, d->pending_with_bam,
331 port->port_num);
332
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530333 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530335 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 if (ret) {
337 pr_debug("%s: write error:%d\n", __func__, ret);
338 d->pending_with_bam--;
339 d->to_modem--;
340 d->tomodem_drp_cnt++;
341 dev_kfree_skb_any(skb);
342 break;
343 }
344 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800345
346 qlen = d->rx_skb_q.qlen;
347
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530348 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800349
350 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
351 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352}
353/*-------------------------------------------------------------*/
354
355static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
356{
357 struct gbam_port *port = ep->driver_data;
358 struct bam_ch_info *d;
359 struct sk_buff *skb = req->context;
360 int status = req->status;
361
362 switch (status) {
363 case 0:
364 /* successful completion */
365 case -ECONNRESET:
366 case -ESHUTDOWN:
367 /* connection gone */
368 break;
369 default:
370 pr_err("%s: data tx ep error %d\n",
371 __func__, status);
372 break;
373 }
374
375 dev_kfree_skb_any(skb);
376
377 if (!port)
378 return;
379
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530380 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 d = &port->data_ch;
382 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530383 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530385 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386}
387
388static void
389gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
390{
391 struct gbam_port *port = ep->driver_data;
392 struct bam_ch_info *d = &port->data_ch;
393 struct sk_buff *skb = req->context;
394 int status = req->status;
395 int queue = 0;
396
397 switch (status) {
398 case 0:
399 skb_put(skb, req->actual);
400 queue = 1;
401 break;
402 case -ECONNRESET:
403 case -ESHUTDOWN:
404 /* cable disconnection */
405 dev_kfree_skb_any(skb);
406 req->buf = 0;
407 usb_ep_free_request(ep, req);
408 return;
409 default:
410 if (printk_ratelimit())
411 pr_err("%s: %s response error %d, %d/%d\n",
412 __func__, ep->name, status,
413 req->actual, req->length);
414 dev_kfree_skb_any(skb);
415 break;
416 }
417
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530418 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 if (queue) {
420 __skb_queue_tail(&d->rx_skb_q, skb);
421 queue_work(gbam_wq, &d->write_tobam_w);
422 }
423
424 /* TODO: Handle flow control gracefully by having
425 * having call back mechanism from bam driver
426 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700427 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800428 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429
430 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530431 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 return;
433 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530434 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700436 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530438 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530440 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 return;
442 }
443 skb_reserve(skb, BAM_MUX_HDR);
444
445 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700446 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 req->context = skb;
448
449 status = usb_ep_queue(ep, req, GFP_ATOMIC);
450 if (status) {
451 dev_kfree_skb_any(skb);
452
453 if (printk_ratelimit())
454 pr_err("%s: data rx enqueue err %d\n",
455 __func__, status);
456
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530457 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530459 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 }
461}
462
Ofir Cohena1c2a872011-12-14 10:26:34 +0200463static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
464{
465 int status = req->status;
466
467 pr_debug("%s status: %d\n", __func__, status);
468}
469
470static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
471{
472 int status = req->status;
473
474 pr_debug("%s status: %d\n", __func__, status);
475}
476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477static void gbam_start_rx(struct gbam_port *port)
478{
479 struct usb_request *req;
480 struct bam_ch_info *d;
481 struct usb_ep *ep;
482 unsigned long flags;
483 int ret;
484 struct sk_buff *skb;
485
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530486 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530488 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 return;
490 }
491
492 d = &port->data_ch;
493 ep = port->port_usb->out;
494
495 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800496
497 if (bam_mux_rx_fctrl_support &&
498 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
499 break;
500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 req = list_first_entry(&d->rx_idle, struct usb_request, list);
502
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700503 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (!skb)
505 break;
506 skb_reserve(skb, BAM_MUX_HDR);
507
508 list_del(&req->list);
509 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700510 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 req->context = skb;
512
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530513 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530515 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 if (ret) {
517 dev_kfree_skb_any(skb);
518
519 if (printk_ratelimit())
520 pr_err("%s: rx queue failed\n", __func__);
521
522 if (port->port_usb)
523 list_add(&req->list, &d->rx_idle);
524 else
525 usb_ep_free_request(ep, req);
526 break;
527 }
528 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530529 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530}
531
Ofir Cohena1c2a872011-12-14 10:26:34 +0200532static void gbam_start_endless_rx(struct gbam_port *port)
533{
534 struct bam_ch_info *d = &port->data_ch;
535 int status;
536
537 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
538 if (status)
539 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
540}
541
542static void gbam_start_endless_tx(struct gbam_port *port)
543{
544 struct bam_ch_info *d = &port->data_ch;
545 int status;
546
547 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
548 if (status)
549 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
550}
551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552static void gbam_start_io(struct gbam_port *port)
553{
554 unsigned long flags;
555 struct usb_ep *ep;
556 int ret;
557 struct bam_ch_info *d;
558
559 pr_debug("%s: port:%p\n", __func__, port);
560
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530561 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530563 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 return;
565 }
566
567 d = &port->data_ch;
568 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700569 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570 gbam_epout_complete, GFP_ATOMIC);
571 if (ret) {
572 pr_err("%s: rx req allocation failed\n", __func__);
573 return;
574 }
575
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530576 spin_unlock_irqrestore(&port->port_lock_ul, flags);
577 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700579 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 gbam_epin_complete, GFP_ATOMIC);
581 if (ret) {
582 pr_err("%s: tx req allocation failed\n", __func__);
583 gbam_free_requests(ep, &d->rx_idle);
584 return;
585 }
586
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530587 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588
589 /* queue out requests */
590 gbam_start_rx(port);
591}
592
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600593static void gbam_notify(void *p, int event, unsigned long data)
594{
595 switch (event) {
596 case BAM_DMUX_RECEIVE:
597 gbam_data_recv_cb(p, (struct sk_buff *)(data));
598 break;
599 case BAM_DMUX_WRITE_DONE:
600 gbam_data_write_done(p, (struct sk_buff *)(data));
601 break;
602 }
603}
604
Ofir Cohena1c2a872011-12-14 10:26:34 +0200605static void gbam_free_buffers(struct gbam_port *port)
606{
607 struct sk_buff *skb;
608 unsigned long flags;
609 struct bam_ch_info *d;
610
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530611 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800612 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200613
614 if (!port || !port->port_usb)
615 goto free_buf_out;
616
617 d = &port->data_ch;
618
619 gbam_free_requests(port->port_usb->in, &d->tx_idle);
620 gbam_free_requests(port->port_usb->out, &d->rx_idle);
621
622 while ((skb = __skb_dequeue(&d->tx_skb_q)))
623 dev_kfree_skb_any(skb);
624
625 while ((skb = __skb_dequeue(&d->rx_skb_q)))
626 dev_kfree_skb_any(skb);
627
628free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800629 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530630 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200631}
632
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800633static void gbam_disconnect_work(struct work_struct *w)
634{
635 struct gbam_port *port =
636 container_of(w, struct gbam_port, disconnect_w);
637 struct bam_ch_info *d = &port->data_ch;
638
639 if (!test_bit(BAM_CH_OPENED, &d->flags))
640 return;
641
642 msm_bam_dmux_close(d->id);
643 clear_bit(BAM_CH_OPENED, &d->flags);
644}
645
Ofir Cohena1c2a872011-12-14 10:26:34 +0200646static void gbam2bam_disconnect_work(struct work_struct *w)
647{
648 struct gbam_port *port =
649 container_of(w, struct gbam_port, disconnect_w);
650 unsigned long flags;
651
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530652 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800653 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200654 port->port_usb = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800655 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530656 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200657
658 /* disable endpoints */
659 usb_ep_disable(port->gr->out);
660 usb_ep_disable(port->gr->in);
661
Anna Perel97b8c222012-01-18 10:08:14 +0200662 port->gr->in->driver_data = NULL;
663 port->gr->out->driver_data = NULL;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200664}
665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666static void gbam_connect_work(struct work_struct *w)
667{
668 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
669 struct bam_ch_info *d = &port->data_ch;
670 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800671 unsigned long flags;
672
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530673 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800674 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800675 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800676 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530677 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800678 return;
679 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800680 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530681 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682
Jack Phameffd4ae2011-08-03 16:49:36 -0700683 if (!test_bit(BAM_CH_READY, &d->flags))
684 return;
685
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600686 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687 if (ret) {
688 pr_err("%s: unable open bam ch:%d err:%d\n",
689 __func__, d->id, ret);
690 return;
691 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700692 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693
694 gbam_start_io(port);
695
696 pr_debug("%s: done\n", __func__);
697}
698
Ofir Cohena1c2a872011-12-14 10:26:34 +0200699static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700700{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200701 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
702 struct bam_ch_info *d = &port->data_ch;
703 u32 sps_params;
704 int ret;
Ofir Cohen4da266f2012-01-03 10:19:29 +0200705 unsigned long flags;
706
David Brownac5d1542012-02-06 10:37:22 -0800707 ret = usb_ep_enable(port->gr->in, port->gr->in_desc);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200708 if (ret) {
709 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
710 __func__, port->gr->in);
711 return;
712 }
713 port->gr->in->driver_data = port;
714
David Brownac5d1542012-02-06 10:37:22 -0800715 ret = usb_ep_enable(port->gr->out, port->gr->out_desc);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200716 if (ret) {
717 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
718 __func__, port->gr->out);
719 port->gr->in->driver_data = 0;
720 return;
721 }
722 port->gr->out->driver_data = port;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530723 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800724 spin_lock(&port->port_lock_dl);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200725 port->port_usb = port->gr;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800726 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530727 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700728
Ofir Cohena1c2a872011-12-14 10:26:34 +0200729 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
730 &d->dst_pipe_idx);
731 if (ret) {
732 pr_err("%s: usb_bam_connect failed: err:%d\n",
733 __func__, ret);
734 return;
735 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700736
Ofir Cohena1c2a872011-12-14 10:26:34 +0200737 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
738 if (!d->rx_req)
739 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700740
Ofir Cohena1c2a872011-12-14 10:26:34 +0200741 d->rx_req->context = port;
742 d->rx_req->complete = gbam_endless_rx_complete;
743 d->rx_req->length = 0;
744 sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
745 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
746 d->rx_req->udc_priv = sps_params;
747 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
748 if (!d->tx_req)
749 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700750
Ofir Cohena1c2a872011-12-14 10:26:34 +0200751 d->tx_req->context = port;
752 d->tx_req->complete = gbam_endless_tx_complete;
753 d->tx_req->length = 0;
754 sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
755 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
756 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700757
Ofir Cohena1c2a872011-12-14 10:26:34 +0200758 /* queue in & out requests */
759 gbam_start_endless_rx(port);
760 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700761
Ofir Cohena1c2a872011-12-14 10:26:34 +0200762 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700763}
764
765/* BAM data channel ready, allow attempt to open */
766static int gbam_data_ch_probe(struct platform_device *pdev)
767{
768 struct gbam_port *port;
769 struct bam_ch_info *d;
770 int i;
771 unsigned long flags;
772
773 pr_debug("%s: name:%s\n", __func__, pdev->name);
774
775 for (i = 0; i < n_bam_ports; i++) {
776 port = bam_ports[i].port;
777 d = &port->data_ch;
778
779 if (!strncmp(bam_ch_names[i], pdev->name,
780 BAM_DMUX_CH_NAME_MAX_LEN)) {
781 set_bit(BAM_CH_READY, &d->flags);
782
783 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530784 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800785 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700786 if (port->port_usb)
787 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800788 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530789 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700790
791 break;
792 }
793 }
794
795 return 0;
796}
797
798/* BAM data channel went inactive, so close it */
799static int gbam_data_ch_remove(struct platform_device *pdev)
800{
801 struct gbam_port *port;
802 struct bam_ch_info *d;
803 struct usb_ep *ep_in = NULL;
804 struct usb_ep *ep_out = NULL;
805 unsigned long flags;
806 int i;
807
808 pr_debug("%s: name:%s\n", __func__, pdev->name);
809
810 for (i = 0; i < n_bam_ports; i++) {
811 if (!strncmp(bam_ch_names[i], pdev->name,
812 BAM_DMUX_CH_NAME_MAX_LEN)) {
813 port = bam_ports[i].port;
814 d = &port->data_ch;
815
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530816 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800817 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700818 if (port->port_usb) {
819 ep_in = port->port_usb->in;
820 ep_out = port->port_usb->out;
821 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800822 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530823 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700824
825 if (ep_in)
826 usb_ep_fifo_flush(ep_in);
827 if (ep_out)
828 usb_ep_fifo_flush(ep_out);
829
830 gbam_free_buffers(port);
831
832 msm_bam_dmux_close(d->id);
833
Vamsi Krishna7658bd12012-01-13 10:32:00 -0800834 /* bam dmux will free all pending skbs */
835 d->pending_with_bam = 0;
836
Jack Phameffd4ae2011-08-03 16:49:36 -0700837 clear_bit(BAM_CH_READY, &d->flags);
838 clear_bit(BAM_CH_OPENED, &d->flags);
839 }
840 }
841
842 return 0;
843}
844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845static void gbam_port_free(int portno)
846{
847 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700848 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849
Jack Phameffd4ae2011-08-03 16:49:36 -0700850 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700852 platform_driver_unregister(pdrv);
853 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854}
855
Ofir Cohena1c2a872011-12-14 10:26:34 +0200856static void gbam2bam_port_free(int portno)
857{
858 struct gbam_port *port = bam2bam_ports[portno];
859
860 kfree(port);
861}
862
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863static int gbam_port_alloc(int portno)
864{
865 struct gbam_port *port;
866 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700867 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868
869 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
870 if (!port)
871 return -ENOMEM;
872
873 port->port_num = portno;
874
875 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530876 spin_lock_init(&port->port_lock_ul);
877 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800879 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880
881 /* data ch */
882 d = &port->data_ch;
883 d->port = port;
884 INIT_LIST_HEAD(&d->tx_idle);
885 INIT_LIST_HEAD(&d->rx_idle);
886 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530887 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 skb_queue_head_init(&d->tx_skb_q);
889 skb_queue_head_init(&d->rx_skb_q);
890 d->id = bam_ch_ids[portno];
891
892 bam_ports[portno].port = port;
893
Jack Phameffd4ae2011-08-03 16:49:36 -0700894 pdrv = &bam_ports[portno].pdrv;
895 pdrv->probe = gbam_data_ch_probe;
896 pdrv->remove = gbam_data_ch_remove;
897 pdrv->driver.name = bam_ch_names[portno];
898 pdrv->driver.owner = THIS_MODULE;
899
900 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200901 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
902
903 return 0;
904}
905
906static int gbam2bam_port_alloc(int portno)
907{
908 struct gbam_port *port;
909 struct bam_ch_info *d;
910
911 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
912 if (!port)
913 return -ENOMEM;
914
915 port->port_num = portno;
916
917 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530918 spin_lock_init(&port->port_lock_ul);
919 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200920
921 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
922 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
923
924 /* data ch */
925 d = &port->data_ch;
926 d->port = port;
927 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
930
931 return 0;
932}
933
934#if defined(CONFIG_DEBUG_FS)
935#define DEBUG_BUF_SIZE 1024
936static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
937 size_t count, loff_t *ppos)
938{
939 struct gbam_port *port;
940 struct bam_ch_info *d;
941 char *buf;
942 unsigned long flags;
943 int ret;
944 int i;
945 int temp = 0;
946
947 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
948 if (!buf)
949 return -ENOMEM;
950
951 for (i = 0; i < n_bam_ports; i++) {
952 port = bam_ports[i].port;
953 if (!port)
954 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530955 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800956 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957
958 d = &port->data_ch;
959
960 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
961 "#PORT:%d port:%p data_ch:%p#\n"
962 "dpkts_to_usbhost: %lu\n"
963 "dpkts_to_modem: %lu\n"
964 "dpkts_pwith_bam: %u\n"
965 "to_usbhost_dcnt: %u\n"
966 "tomodem__dcnt: %u\n"
967 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800968 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700969 "data_ch_open: %d\n"
970 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 i, port, &port->data_ch,
972 d->to_host, d->to_modem,
973 d->pending_with_bam,
974 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800975 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700976 test_bit(BAM_CH_OPENED, &d->flags),
977 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800979 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530980 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 }
982
983 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
984
985 kfree(buf);
986
987 return ret;
988}
989
990static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
991 size_t count, loff_t *ppos)
992{
993 struct gbam_port *port;
994 struct bam_ch_info *d;
995 int i;
996 unsigned long flags;
997
998 for (i = 0; i < n_bam_ports; i++) {
999 port = bam_ports[i].port;
1000 if (!port)
1001 continue;
1002
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301003 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001004 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005
1006 d = &port->data_ch;
1007
1008 d->to_host = 0;
1009 d->to_modem = 0;
1010 d->pending_with_bam = 0;
1011 d->tohost_drp_cnt = 0;
1012 d->tomodem_drp_cnt = 0;
1013
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001014 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301015 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 }
1017 return count;
1018}
1019
1020const struct file_operations gbam_stats_ops = {
1021 .read = gbam_read_stats,
1022 .write = gbam_reset_stats,
1023};
1024
1025static void gbam_debugfs_init(void)
1026{
1027 struct dentry *dent;
1028 struct dentry *dfile;
1029
1030 dent = debugfs_create_dir("usb_rmnet", 0);
1031 if (IS_ERR(dent))
1032 return;
1033
1034 /* TODO: Implement cleanup function to remove created file */
1035 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1036 if (!dfile || IS_ERR(dfile))
1037 debugfs_remove(dent);
1038}
1039#else
1040static void gam_debugfs_init(void) { }
1041#endif
1042
Ofir Cohena1c2a872011-12-14 10:26:34 +02001043void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044{
1045 struct gbam_port *port;
1046 unsigned long flags;
1047 struct bam_ch_info *d;
1048
1049 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1050
Ofir Cohena1c2a872011-12-14 10:26:34 +02001051 if (trans == USB_GADGET_XPORT_BAM &&
1052 port_num >= n_bam_ports) {
1053 pr_err("%s: invalid bam portno#%d\n",
1054 __func__, port_num);
1055 return;
1056 }
1057
1058 if (trans == USB_GADGET_XPORT_BAM2BAM &&
1059 port_num >= n_bam2bam_ports) {
1060 pr_err("%s: invalid bam2bam portno#%d\n",
1061 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 return;
1063 }
1064
1065 if (!gr) {
1066 pr_err("%s: grmnet port is null\n", __func__);
1067 return;
1068 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001069 if (trans == USB_GADGET_XPORT_BAM)
1070 port = bam_ports[port_num].port;
1071 else
1072 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001074 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001075 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076
Ofir Cohena1c2a872011-12-14 10:26:34 +02001077 if (trans == USB_GADGET_XPORT_BAM) {
1078 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301080 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001081 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301082 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301083 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001084 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301085 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086
Ofir Cohena1c2a872011-12-14 10:26:34 +02001087 /* disable endpoints */
1088 usb_ep_disable(gr->out);
1089 usb_ep_disable(gr->in);
1090 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001092 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093}
1094
Ofir Cohena1c2a872011-12-14 10:26:34 +02001095int gbam_connect(struct grmnet *gr, u8 port_num,
1096 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097{
1098 struct gbam_port *port;
1099 struct bam_ch_info *d;
1100 int ret;
1101 unsigned long flags;
1102
1103 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1104
Ofir Cohena1c2a872011-12-14 10:26:34 +02001105 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1106 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1107 return -ENODEV;
1108 }
1109
1110 if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1112 return -ENODEV;
1113 }
1114
1115 if (!gr) {
1116 pr_err("%s: grmnet port is null\n", __func__);
1117 return -ENODEV;
1118 }
1119
Ofir Cohena1c2a872011-12-14 10:26:34 +02001120 if (trans == USB_GADGET_XPORT_BAM)
1121 port = bam_ports[port_num].port;
1122 else
1123 port = bam2bam_ports[port_num];
1124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 d = &port->data_ch;
1126
Ofir Cohena1c2a872011-12-14 10:26:34 +02001127 if (trans == USB_GADGET_XPORT_BAM) {
David Brownac5d1542012-02-06 10:37:22 -08001128 ret = usb_ep_enable(gr->in, gr->in_desc);
Ofir Cohen4da266f2012-01-03 10:19:29 +02001129 if (ret) {
1130 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1131 __func__, gr->in);
1132 return ret;
1133 }
1134 gr->in->driver_data = port;
1135
David Brownac5d1542012-02-06 10:37:22 -08001136 ret = usb_ep_enable(gr->out, gr->out_desc);
Ofir Cohen4da266f2012-01-03 10:19:29 +02001137 if (ret) {
1138 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1139 __func__, gr->out);
1140 gr->in->driver_data = 0;
1141 return ret;
1142 }
1143 gr->out->driver_data = port;
1144
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301145 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001146 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301147 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001148
Ofir Cohena1c2a872011-12-14 10:26:34 +02001149 d->to_host = 0;
1150 d->to_modem = 0;
1151 d->pending_with_bam = 0;
1152 d->tohost_drp_cnt = 0;
1153 d->tomodem_drp_cnt = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001154 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301155 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001156 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157
Ofir Cohen4da266f2012-01-03 10:19:29 +02001158 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1159 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001160 d->connection_idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001161 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162
1163 queue_work(gbam_wq, &port->connect_w);
1164
1165 return 0;
1166}
1167
Ofir Cohena1c2a872011-12-14 10:26:34 +02001168int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169{
1170 int i;
1171 int ret;
1172
Ofir Cohena1c2a872011-12-14 10:26:34 +02001173 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1174 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175
Ofir Cohena1c2a872011-12-14 10:26:34 +02001176 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1177 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1178 pr_err("%s: Invalid num of ports count:%d,%d\n",
1179 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 return -EINVAL;
1181 }
1182
1183 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1184 if (!gbam_wq) {
1185 pr_err("%s: Unable to create workqueue gbam_wq\n",
1186 __func__);
1187 return -ENOMEM;
1188 }
1189
Ofir Cohena1c2a872011-12-14 10:26:34 +02001190 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301191 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 ret = gbam_port_alloc(i);
1193 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301194 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1196 goto free_bam_ports;
1197 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 }
1199
Ofir Cohena1c2a872011-12-14 10:26:34 +02001200 for (i = 0; i < no_bam2bam_port; i++) {
1201 n_bam2bam_ports++;
1202 ret = gbam2bam_port_alloc(i);
1203 if (ret) {
1204 n_bam2bam_ports--;
1205 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1206 goto free_bam_ports;
1207 }
1208 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212free_bam_ports:
1213 for (i = 0; i < n_bam_ports; i++)
1214 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001215 for (i = 0; i < n_bam2bam_ports; i++)
1216 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217 destroy_workqueue(gbam_wq);
1218
1219 return ret;
1220}