blob: 3113c45a48d9ba00be0c094643ef543f6d4a89d8 [file] [log] [blame]
Anna Perel97b8c222012-01-18 10:08:14 +02001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
27#include <mach/usb_bam.h>
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "u_rmnet.h"
30
31#define BAM_N_PORTS 1
Anna Perel21515162012-02-02 20:50:02 +020032#define BAM2BAM_N_PORTS 3
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static struct workqueue_struct *gbam_wq;
35static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020036static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053037static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038static unsigned bam_ch_ids[] = { 8 };
39
Jack Phameffd4ae2011-08-03 16:49:36 -070040static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
41
Vamsi Krishna84579552011-11-09 15:33:22 -080042#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070043#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080044#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
45#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070046#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
48#define BAM_MUX_HDR 8
49
Vamsi Krishna8f24f252011-11-02 11:46:08 -070050#define BAM_MUX_RX_Q_SIZE 16
51#define BAM_MUX_TX_Q_SIZE 200
52#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053054#define DL_INTR_THRESHOLD 20
55
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
57module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
60module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
63module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
66module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Vamsi Krishna8f24f252011-11-02 11:46:08 -070068unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
69module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Vamsi Krishna8f24f252011-11-02 11:46:08 -070071unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
72module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Vamsi Krishna8f24f252011-11-02 11:46:08 -070074unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
75module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053077unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
78module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
79
Jack Phameffd4ae2011-08-03 16:49:36 -070080#define BAM_CH_OPENED BIT(0)
81#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070084 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085 unsigned id;
86
87 struct list_head tx_idle;
88 struct sk_buff_head tx_skb_q;
89
90 struct list_head rx_idle;
91 struct sk_buff_head rx_skb_q;
92
93 struct gbam_port *port;
94 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053095 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096
Ofir Cohena1c2a872011-12-14 10:26:34 +020097 struct usb_request *rx_req;
98 struct usb_request *tx_req;
99
100 u8 src_pipe_idx;
101 u8 dst_pipe_idx;
102 u8 connection_idx;
103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 /* stats */
105 unsigned int pending_with_bam;
106 unsigned int tohost_drp_cnt;
107 unsigned int tomodem_drp_cnt;
108 unsigned int tx_len;
109 unsigned int rx_len;
110 unsigned long to_modem;
111 unsigned long to_host;
112};
113
114struct gbam_port {
115 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530116 spinlock_t port_lock_ul;
117 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118
119 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200120 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121
122 struct bam_ch_info data_ch;
123
124 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800125 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126};
127
128static struct bam_portmaster {
129 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700130 struct platform_driver pdrv;
131} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132
Ofir Cohena1c2a872011-12-14 10:26:34 +0200133struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200135static void gbam_start_endless_rx(struct gbam_port *port);
136static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137
138/*---------------misc functions---------------- */
139static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
140{
141 struct usb_request *req;
142
143 while (!list_empty(head)) {
144 req = list_entry(head->next, struct usb_request, list);
145 list_del(&req->list);
146 usb_ep_free_request(ep, req);
147 }
148}
149
150static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
151 int num,
152 void (*cb)(struct usb_ep *ep, struct usb_request *),
153 gfp_t flags)
154{
155 int i;
156 struct usb_request *req;
157
158 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
159 ep, head, num, cb);
160
161 for (i = 0; i < num; i++) {
162 req = usb_ep_alloc_request(ep, flags);
163 if (!req) {
164 pr_debug("%s: req allocated:%d\n", __func__, i);
165 return list_empty(head) ? -ENOMEM : 0;
166 }
167 req->complete = cb;
168 list_add(&req->list, head);
169 }
170
171 return 0;
172}
173/*--------------------------------------------- */
174
175/*------------data_path----------------------------*/
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530176static void gbam_write_data_tohost(struct gbam_port *port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177{
178 unsigned long flags;
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530179 struct bam_ch_info *d = &port->data_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 struct sk_buff *skb;
181 int ret;
182 struct usb_request *req;
183 struct usb_ep *ep;
184
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530185 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530187 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 return;
189 }
190
191 ep = port->port_usb->in;
192
193 while (!list_empty(&d->tx_idle)) {
194 skb = __skb_dequeue(&d->tx_skb_q);
195 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530196 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 return;
198 }
199 req = list_first_entry(&d->tx_idle,
200 struct usb_request,
201 list);
202 req->context = skb;
203 req->buf = skb->data;
204 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530205 n_tx_req_queued++;
206 if (n_tx_req_queued == dl_intr_threshold) {
207 req->no_interrupt = 0;
208 n_tx_req_queued = 0;
209 } else {
210 req->no_interrupt = 1;
211 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212
213 list_del(&req->list);
214
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530215 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530217 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 if (ret) {
219 pr_err("%s: usb epIn failed\n", __func__);
220 list_add(&req->list, &d->tx_idle);
221 dev_kfree_skb_any(skb);
222 break;
223 }
224 d->to_host++;
225 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530226 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227}
228
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530229static void gbam_write_data_tohost_w(struct work_struct *w)
230{
231 struct bam_ch_info *d;
232 struct gbam_port *port;
233
234 d = container_of(w, struct bam_ch_info, write_tohost_w);
235 port = d->port;
236
237 gbam_write_data_tohost(port);
238}
239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240void gbam_data_recv_cb(void *p, struct sk_buff *skb)
241{
242 struct gbam_port *port = p;
243 struct bam_ch_info *d = &port->data_ch;
244 unsigned long flags;
245
246 if (!skb)
247 return;
248
249 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
250 port, port->port_num, d, skb->len);
251
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530252 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530254 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255 dev_kfree_skb_any(skb);
256 return;
257 }
258
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700259 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 d->tohost_drp_cnt++;
261 if (printk_ratelimit())
262 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
263 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530264 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 dev_kfree_skb_any(skb);
266 return;
267 }
268
269 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530270 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530272 gbam_write_data_tohost(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273}
274
275void gbam_data_write_done(void *p, struct sk_buff *skb)
276{
277 struct gbam_port *port = p;
278 struct bam_ch_info *d = &port->data_ch;
279 unsigned long flags;
280
281 if (!skb)
282 return;
283
284 dev_kfree_skb_any(skb);
285
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530286 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287
288 d->pending_with_bam--;
289
290 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
291 port, d, d->to_modem,
292 d->pending_with_bam, port->port_num);
293
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530294 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295
Vamsi Krishna84579552011-11-09 15:33:22 -0800296 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297}
298
299static void gbam_data_write_tobam(struct work_struct *w)
300{
301 struct gbam_port *port;
302 struct bam_ch_info *d;
303 struct sk_buff *skb;
304 unsigned long flags;
305 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800306 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307
308 d = container_of(w, struct bam_ch_info, write_tobam_w);
309 port = d->port;
310
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530311 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530313 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 return;
315 }
316
Vamsi Krishna84579552011-11-09 15:33:22 -0800317 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800318 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800319 if (!skb)
320 break;
321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 d->pending_with_bam++;
323 d->to_modem++;
324
325 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
326 port, d, d->to_modem, d->pending_with_bam,
327 port->port_num);
328
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530329 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530331 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 if (ret) {
333 pr_debug("%s: write error:%d\n", __func__, ret);
334 d->pending_with_bam--;
335 d->to_modem--;
336 d->tomodem_drp_cnt++;
337 dev_kfree_skb_any(skb);
338 break;
339 }
340 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800341
342 qlen = d->rx_skb_q.qlen;
343
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530344 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800345
346 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
347 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348}
349/*-------------------------------------------------------------*/
350
351static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
352{
353 struct gbam_port *port = ep->driver_data;
354 struct bam_ch_info *d;
355 struct sk_buff *skb = req->context;
356 int status = req->status;
357
358 switch (status) {
359 case 0:
360 /* successful completion */
361 case -ECONNRESET:
362 case -ESHUTDOWN:
363 /* connection gone */
364 break;
365 default:
366 pr_err("%s: data tx ep error %d\n",
367 __func__, status);
368 break;
369 }
370
371 dev_kfree_skb_any(skb);
372
373 if (!port)
374 return;
375
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530376 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 d = &port->data_ch;
378 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530379 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530381 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382}
383
384static void
385gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
386{
387 struct gbam_port *port = ep->driver_data;
388 struct bam_ch_info *d = &port->data_ch;
389 struct sk_buff *skb = req->context;
390 int status = req->status;
391 int queue = 0;
392
393 switch (status) {
394 case 0:
395 skb_put(skb, req->actual);
396 queue = 1;
397 break;
398 case -ECONNRESET:
399 case -ESHUTDOWN:
400 /* cable disconnection */
401 dev_kfree_skb_any(skb);
402 req->buf = 0;
403 usb_ep_free_request(ep, req);
404 return;
405 default:
406 if (printk_ratelimit())
407 pr_err("%s: %s response error %d, %d/%d\n",
408 __func__, ep->name, status,
409 req->actual, req->length);
410 dev_kfree_skb_any(skb);
411 break;
412 }
413
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530414 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 if (queue) {
416 __skb_queue_tail(&d->rx_skb_q, skb);
417 queue_work(gbam_wq, &d->write_tobam_w);
418 }
419
420 /* TODO: Handle flow control gracefully by having
421 * having call back mechanism from bam driver
422 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700423 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800424 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
426 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530427 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 return;
429 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530430 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700432 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530434 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530436 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 return;
438 }
439 skb_reserve(skb, BAM_MUX_HDR);
440
441 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700442 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 req->context = skb;
444
445 status = usb_ep_queue(ep, req, GFP_ATOMIC);
446 if (status) {
447 dev_kfree_skb_any(skb);
448
449 if (printk_ratelimit())
450 pr_err("%s: data rx enqueue err %d\n",
451 __func__, status);
452
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530453 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530455 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456 }
457}
458
Ofir Cohena1c2a872011-12-14 10:26:34 +0200459static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
460{
461 int status = req->status;
462
463 pr_debug("%s status: %d\n", __func__, status);
464}
465
466static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
467{
468 int status = req->status;
469
470 pr_debug("%s status: %d\n", __func__, status);
471}
472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473static void gbam_start_rx(struct gbam_port *port)
474{
475 struct usb_request *req;
476 struct bam_ch_info *d;
477 struct usb_ep *ep;
478 unsigned long flags;
479 int ret;
480 struct sk_buff *skb;
481
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530482 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530484 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 return;
486 }
487
488 d = &port->data_ch;
489 ep = port->port_usb->out;
490
491 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800492
493 if (bam_mux_rx_fctrl_support &&
494 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
495 break;
496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 req = list_first_entry(&d->rx_idle, struct usb_request, list);
498
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700499 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 if (!skb)
501 break;
502 skb_reserve(skb, BAM_MUX_HDR);
503
504 list_del(&req->list);
505 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700506 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 req->context = skb;
508
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530509 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530511 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 if (ret) {
513 dev_kfree_skb_any(skb);
514
515 if (printk_ratelimit())
516 pr_err("%s: rx queue failed\n", __func__);
517
518 if (port->port_usb)
519 list_add(&req->list, &d->rx_idle);
520 else
521 usb_ep_free_request(ep, req);
522 break;
523 }
524 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530525 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526}
527
Ofir Cohena1c2a872011-12-14 10:26:34 +0200528static void gbam_start_endless_rx(struct gbam_port *port)
529{
530 struct bam_ch_info *d = &port->data_ch;
531 int status;
532
533 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
534 if (status)
535 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
536}
537
538static void gbam_start_endless_tx(struct gbam_port *port)
539{
540 struct bam_ch_info *d = &port->data_ch;
541 int status;
542
543 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
544 if (status)
545 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
546}
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548static void gbam_start_io(struct gbam_port *port)
549{
550 unsigned long flags;
551 struct usb_ep *ep;
552 int ret;
553 struct bam_ch_info *d;
554
555 pr_debug("%s: port:%p\n", __func__, port);
556
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530557 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530559 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 return;
561 }
562
563 d = &port->data_ch;
564 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700565 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 gbam_epout_complete, GFP_ATOMIC);
567 if (ret) {
568 pr_err("%s: rx req allocation failed\n", __func__);
569 return;
570 }
571
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530572 spin_unlock_irqrestore(&port->port_lock_ul, flags);
573 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700575 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 gbam_epin_complete, GFP_ATOMIC);
577 if (ret) {
578 pr_err("%s: tx req allocation failed\n", __func__);
579 gbam_free_requests(ep, &d->rx_idle);
580 return;
581 }
582
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530583 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584
585 /* queue out requests */
586 gbam_start_rx(port);
587}
588
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600589static void gbam_notify(void *p, int event, unsigned long data)
590{
591 switch (event) {
592 case BAM_DMUX_RECEIVE:
593 gbam_data_recv_cb(p, (struct sk_buff *)(data));
594 break;
595 case BAM_DMUX_WRITE_DONE:
596 gbam_data_write_done(p, (struct sk_buff *)(data));
597 break;
598 }
599}
600
Ofir Cohena1c2a872011-12-14 10:26:34 +0200601static void gbam_free_buffers(struct gbam_port *port)
602{
603 struct sk_buff *skb;
604 unsigned long flags;
605 struct bam_ch_info *d;
606
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530607 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800608 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200609
610 if (!port || !port->port_usb)
611 goto free_buf_out;
612
613 d = &port->data_ch;
614
615 gbam_free_requests(port->port_usb->in, &d->tx_idle);
616 gbam_free_requests(port->port_usb->out, &d->rx_idle);
617
618 while ((skb = __skb_dequeue(&d->tx_skb_q)))
619 dev_kfree_skb_any(skb);
620
621 while ((skb = __skb_dequeue(&d->rx_skb_q)))
622 dev_kfree_skb_any(skb);
623
624free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800625 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530626 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200627}
628
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800629static void gbam_disconnect_work(struct work_struct *w)
630{
631 struct gbam_port *port =
632 container_of(w, struct gbam_port, disconnect_w);
633 struct bam_ch_info *d = &port->data_ch;
634
635 if (!test_bit(BAM_CH_OPENED, &d->flags))
636 return;
637
638 msm_bam_dmux_close(d->id);
639 clear_bit(BAM_CH_OPENED, &d->flags);
640}
641
Ofir Cohena1c2a872011-12-14 10:26:34 +0200642static void gbam2bam_disconnect_work(struct work_struct *w)
643{
644 struct gbam_port *port =
645 container_of(w, struct gbam_port, disconnect_w);
646 unsigned long flags;
647
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530648 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800649 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200650 port->port_usb = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800651 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530652 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200653
654 /* disable endpoints */
655 usb_ep_disable(port->gr->out);
656 usb_ep_disable(port->gr->in);
657
Anna Perel97b8c222012-01-18 10:08:14 +0200658 port->gr->in->driver_data = NULL;
659 port->gr->out->driver_data = NULL;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200660}
661
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662static void gbam_connect_work(struct work_struct *w)
663{
664 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
665 struct bam_ch_info *d = &port->data_ch;
666 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800667 unsigned long flags;
668
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530669 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800670 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800671 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800672 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530673 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800674 return;
675 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800676 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530677 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678
Jack Phameffd4ae2011-08-03 16:49:36 -0700679 if (!test_bit(BAM_CH_READY, &d->flags))
680 return;
681
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600682 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 if (ret) {
684 pr_err("%s: unable open bam ch:%d err:%d\n",
685 __func__, d->id, ret);
686 return;
687 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700688 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689
690 gbam_start_io(port);
691
692 pr_debug("%s: done\n", __func__);
693}
694
Ofir Cohena1c2a872011-12-14 10:26:34 +0200695static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700696{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200697 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
698 struct bam_ch_info *d = &port->data_ch;
699 u32 sps_params;
700 int ret;
Ofir Cohen4da266f2012-01-03 10:19:29 +0200701 unsigned long flags;
702
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300703 ret = usb_ep_enable(port->gr->in);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200704 if (ret) {
705 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
706 __func__, port->gr->in);
707 return;
708 }
709 port->gr->in->driver_data = port;
710
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300711 ret = usb_ep_enable(port->gr->out);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200712 if (ret) {
713 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
714 __func__, port->gr->out);
715 port->gr->in->driver_data = 0;
716 return;
717 }
718 port->gr->out->driver_data = port;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530719 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800720 spin_lock(&port->port_lock_dl);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200721 port->port_usb = port->gr;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800722 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530723 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700724
Ofir Cohena1c2a872011-12-14 10:26:34 +0200725 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
726 &d->dst_pipe_idx);
727 if (ret) {
728 pr_err("%s: usb_bam_connect failed: err:%d\n",
729 __func__, ret);
730 return;
731 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700732
Ofir Cohena1c2a872011-12-14 10:26:34 +0200733 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
734 if (!d->rx_req)
735 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700736
Ofir Cohena1c2a872011-12-14 10:26:34 +0200737 d->rx_req->context = port;
738 d->rx_req->complete = gbam_endless_rx_complete;
739 d->rx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200740 sps_params = (MSM_SPS_MODE | d->src_pipe_idx |
741 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200742 d->rx_req->udc_priv = sps_params;
743 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
744 if (!d->tx_req)
745 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700746
Ofir Cohena1c2a872011-12-14 10:26:34 +0200747 d->tx_req->context = port;
748 d->tx_req->complete = gbam_endless_tx_complete;
749 d->tx_req->length = 0;
Ido Shayevitzd1cb16c2012-03-28 18:57:47 +0200750 sps_params = (MSM_SPS_MODE | d->dst_pipe_idx |
751 MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200752 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700753
Ofir Cohena1c2a872011-12-14 10:26:34 +0200754 /* queue in & out requests */
755 gbam_start_endless_rx(port);
756 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700757
Ofir Cohena1c2a872011-12-14 10:26:34 +0200758 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700759}
760
761/* BAM data channel ready, allow attempt to open */
762static int gbam_data_ch_probe(struct platform_device *pdev)
763{
764 struct gbam_port *port;
765 struct bam_ch_info *d;
766 int i;
767 unsigned long flags;
768
769 pr_debug("%s: name:%s\n", __func__, pdev->name);
770
771 for (i = 0; i < n_bam_ports; i++) {
772 port = bam_ports[i].port;
773 d = &port->data_ch;
774
775 if (!strncmp(bam_ch_names[i], pdev->name,
776 BAM_DMUX_CH_NAME_MAX_LEN)) {
777 set_bit(BAM_CH_READY, &d->flags);
778
779 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530780 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800781 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700782 if (port->port_usb)
783 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800784 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530785 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700786
787 break;
788 }
789 }
790
791 return 0;
792}
793
794/* BAM data channel went inactive, so close it */
795static int gbam_data_ch_remove(struct platform_device *pdev)
796{
797 struct gbam_port *port;
798 struct bam_ch_info *d;
799 struct usb_ep *ep_in = NULL;
800 struct usb_ep *ep_out = NULL;
801 unsigned long flags;
802 int i;
803
804 pr_debug("%s: name:%s\n", __func__, pdev->name);
805
806 for (i = 0; i < n_bam_ports; i++) {
807 if (!strncmp(bam_ch_names[i], pdev->name,
808 BAM_DMUX_CH_NAME_MAX_LEN)) {
809 port = bam_ports[i].port;
810 d = &port->data_ch;
811
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530812 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800813 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700814 if (port->port_usb) {
815 ep_in = port->port_usb->in;
816 ep_out = port->port_usb->out;
817 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800818 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530819 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700820
821 if (ep_in)
822 usb_ep_fifo_flush(ep_in);
823 if (ep_out)
824 usb_ep_fifo_flush(ep_out);
825
826 gbam_free_buffers(port);
827
828 msm_bam_dmux_close(d->id);
829
Vamsi Krishna7658bd12012-01-13 10:32:00 -0800830 /* bam dmux will free all pending skbs */
831 d->pending_with_bam = 0;
832
Jack Phameffd4ae2011-08-03 16:49:36 -0700833 clear_bit(BAM_CH_READY, &d->flags);
834 clear_bit(BAM_CH_OPENED, &d->flags);
835 }
836 }
837
838 return 0;
839}
840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841static void gbam_port_free(int portno)
842{
843 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700844 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845
Jack Phameffd4ae2011-08-03 16:49:36 -0700846 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700848 platform_driver_unregister(pdrv);
849 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850}
851
Ofir Cohena1c2a872011-12-14 10:26:34 +0200852static void gbam2bam_port_free(int portno)
853{
854 struct gbam_port *port = bam2bam_ports[portno];
855
856 kfree(port);
857}
858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859static int gbam_port_alloc(int portno)
860{
861 struct gbam_port *port;
862 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700863 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864
865 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
866 if (!port)
867 return -ENOMEM;
868
869 port->port_num = portno;
870
871 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530872 spin_lock_init(&port->port_lock_ul);
873 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800875 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
877 /* data ch */
878 d = &port->data_ch;
879 d->port = port;
880 INIT_LIST_HEAD(&d->tx_idle);
881 INIT_LIST_HEAD(&d->rx_idle);
882 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Chiranjeevi Velempatie5105922012-01-19 12:25:26 +0530883 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884 skb_queue_head_init(&d->tx_skb_q);
885 skb_queue_head_init(&d->rx_skb_q);
886 d->id = bam_ch_ids[portno];
887
888 bam_ports[portno].port = port;
889
Jack Phameffd4ae2011-08-03 16:49:36 -0700890 pdrv = &bam_ports[portno].pdrv;
891 pdrv->probe = gbam_data_ch_probe;
892 pdrv->remove = gbam_data_ch_remove;
893 pdrv->driver.name = bam_ch_names[portno];
894 pdrv->driver.owner = THIS_MODULE;
895
896 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200897 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
898
899 return 0;
900}
901
902static int gbam2bam_port_alloc(int portno)
903{
904 struct gbam_port *port;
905 struct bam_ch_info *d;
906
907 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
908 if (!port)
909 return -ENOMEM;
910
911 port->port_num = portno;
912
913 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530914 spin_lock_init(&port->port_lock_ul);
915 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200916
917 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
918 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
919
920 /* data ch */
921 d = &port->data_ch;
922 d->port = port;
923 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700924
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
926
927 return 0;
928}
929
930#if defined(CONFIG_DEBUG_FS)
931#define DEBUG_BUF_SIZE 1024
932static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
933 size_t count, loff_t *ppos)
934{
935 struct gbam_port *port;
936 struct bam_ch_info *d;
937 char *buf;
938 unsigned long flags;
939 int ret;
940 int i;
941 int temp = 0;
942
943 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
944 if (!buf)
945 return -ENOMEM;
946
947 for (i = 0; i < n_bam_ports; i++) {
948 port = bam_ports[i].port;
949 if (!port)
950 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530951 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800952 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953
954 d = &port->data_ch;
955
956 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
957 "#PORT:%d port:%p data_ch:%p#\n"
958 "dpkts_to_usbhost: %lu\n"
959 "dpkts_to_modem: %lu\n"
960 "dpkts_pwith_bam: %u\n"
961 "to_usbhost_dcnt: %u\n"
962 "tomodem__dcnt: %u\n"
963 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800964 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700965 "data_ch_open: %d\n"
966 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 i, port, &port->data_ch,
968 d->to_host, d->to_modem,
969 d->pending_with_bam,
970 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800971 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700972 test_bit(BAM_CH_OPENED, &d->flags),
973 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800975 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530976 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 }
978
979 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
980
981 kfree(buf);
982
983 return ret;
984}
985
986static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
987 size_t count, loff_t *ppos)
988{
989 struct gbam_port *port;
990 struct bam_ch_info *d;
991 int i;
992 unsigned long flags;
993
994 for (i = 0; i < n_bam_ports; i++) {
995 port = bam_ports[i].port;
996 if (!port)
997 continue;
998
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530999 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001000 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001
1002 d = &port->data_ch;
1003
1004 d->to_host = 0;
1005 d->to_modem = 0;
1006 d->pending_with_bam = 0;
1007 d->tohost_drp_cnt = 0;
1008 d->tomodem_drp_cnt = 0;
1009
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001010 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301011 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001012 }
1013 return count;
1014}
1015
1016const struct file_operations gbam_stats_ops = {
1017 .read = gbam_read_stats,
1018 .write = gbam_reset_stats,
1019};
1020
1021static void gbam_debugfs_init(void)
1022{
1023 struct dentry *dent;
1024 struct dentry *dfile;
1025
1026 dent = debugfs_create_dir("usb_rmnet", 0);
1027 if (IS_ERR(dent))
1028 return;
1029
1030 /* TODO: Implement cleanup function to remove created file */
1031 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1032 if (!dfile || IS_ERR(dfile))
1033 debugfs_remove(dent);
1034}
1035#else
1036static void gam_debugfs_init(void) { }
1037#endif
1038
Ofir Cohena1c2a872011-12-14 10:26:34 +02001039void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040{
1041 struct gbam_port *port;
1042 unsigned long flags;
1043 struct bam_ch_info *d;
1044
1045 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1046
Ofir Cohena1c2a872011-12-14 10:26:34 +02001047 if (trans == USB_GADGET_XPORT_BAM &&
1048 port_num >= n_bam_ports) {
1049 pr_err("%s: invalid bam portno#%d\n",
1050 __func__, port_num);
1051 return;
1052 }
1053
1054 if (trans == USB_GADGET_XPORT_BAM2BAM &&
1055 port_num >= n_bam2bam_ports) {
1056 pr_err("%s: invalid bam2bam portno#%d\n",
1057 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 return;
1059 }
1060
1061 if (!gr) {
1062 pr_err("%s: grmnet port is null\n", __func__);
1063 return;
1064 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001065 if (trans == USB_GADGET_XPORT_BAM)
1066 port = bam_ports[port_num].port;
1067 else
1068 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001070 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001071 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072
Ofir Cohena1c2a872011-12-14 10:26:34 +02001073 if (trans == USB_GADGET_XPORT_BAM) {
1074 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301076 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001077 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301078 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301079 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001080 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301081 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082
Ofir Cohena1c2a872011-12-14 10:26:34 +02001083 /* disable endpoints */
1084 usb_ep_disable(gr->out);
1085 usb_ep_disable(gr->in);
1086 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001088 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089}
1090
Ofir Cohena1c2a872011-12-14 10:26:34 +02001091int gbam_connect(struct grmnet *gr, u8 port_num,
1092 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093{
1094 struct gbam_port *port;
1095 struct bam_ch_info *d;
1096 int ret;
1097 unsigned long flags;
1098
1099 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1100
Ofir Cohena1c2a872011-12-14 10:26:34 +02001101 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1102 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1103 return -ENODEV;
1104 }
1105
1106 if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1108 return -ENODEV;
1109 }
1110
1111 if (!gr) {
1112 pr_err("%s: grmnet port is null\n", __func__);
1113 return -ENODEV;
1114 }
1115
Ofir Cohena1c2a872011-12-14 10:26:34 +02001116 if (trans == USB_GADGET_XPORT_BAM)
1117 port = bam_ports[port_num].port;
1118 else
1119 port = bam2bam_ports[port_num];
1120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 d = &port->data_ch;
1122
Ofir Cohena1c2a872011-12-14 10:26:34 +02001123 if (trans == USB_GADGET_XPORT_BAM) {
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001124 ret = usb_ep_enable(gr->in);
Ofir Cohen4da266f2012-01-03 10:19:29 +02001125 if (ret) {
1126 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1127 __func__, gr->in);
1128 return ret;
1129 }
1130 gr->in->driver_data = port;
1131
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001132 ret = usb_ep_enable(gr->out);
Ofir Cohen4da266f2012-01-03 10:19:29 +02001133 if (ret) {
1134 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1135 __func__, gr->out);
1136 gr->in->driver_data = 0;
1137 return ret;
1138 }
1139 gr->out->driver_data = port;
1140
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301141 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001142 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301143 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001144
Ofir Cohena1c2a872011-12-14 10:26:34 +02001145 d->to_host = 0;
1146 d->to_modem = 0;
1147 d->pending_with_bam = 0;
1148 d->tohost_drp_cnt = 0;
1149 d->tomodem_drp_cnt = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001150 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301151 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001152 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001153
Ofir Cohen4da266f2012-01-03 10:19:29 +02001154 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1155 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001156 d->connection_idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001157 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158
1159 queue_work(gbam_wq, &port->connect_w);
1160
1161 return 0;
1162}
1163
Ofir Cohena1c2a872011-12-14 10:26:34 +02001164int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165{
1166 int i;
1167 int ret;
1168
Ofir Cohena1c2a872011-12-14 10:26:34 +02001169 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1170 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171
Ofir Cohena1c2a872011-12-14 10:26:34 +02001172 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1173 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1174 pr_err("%s: Invalid num of ports count:%d,%d\n",
1175 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 return -EINVAL;
1177 }
1178
1179 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1180 if (!gbam_wq) {
1181 pr_err("%s: Unable to create workqueue gbam_wq\n",
1182 __func__);
1183 return -ENOMEM;
1184 }
1185
Ofir Cohena1c2a872011-12-14 10:26:34 +02001186 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301187 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 ret = gbam_port_alloc(i);
1189 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301190 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1192 goto free_bam_ports;
1193 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 }
1195
Ofir Cohena1c2a872011-12-14 10:26:34 +02001196 for (i = 0; i < no_bam2bam_port; i++) {
1197 n_bam2bam_ports++;
1198 ret = gbam2bam_port_alloc(i);
1199 if (ret) {
1200 n_bam2bam_ports--;
1201 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1202 goto free_bam_ports;
1203 }
1204 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208free_bam_ports:
1209 for (i = 0; i < n_bam_ports; i++)
1210 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001211 for (i = 0; i < n_bam2bam_ports; i++)
1212 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 destroy_workqueue(gbam_wq);
1214
1215 return ret;
1216}