blob: e37ddbf578d07705f6a9c79548757efe20791958 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
26#include "u_rmnet.h"
27
28#define BAM_N_PORTS 1
29
30static struct workqueue_struct *gbam_wq;
31static int n_bam_ports;
32static unsigned bam_ch_ids[] = { 8 };
33
Jack Phameffd4ae2011-08-03 16:49:36 -070034static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
35
Vamsi Krishna8f24f252011-11-02 11:46:08 -070036#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
37#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 200
38#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 125
39#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41#define BAM_MUX_HDR 8
42
Vamsi Krishna8f24f252011-11-02 11:46:08 -070043#define BAM_MUX_RX_Q_SIZE 16
44#define BAM_MUX_TX_Q_SIZE 200
45#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
48module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Vamsi Krishna8f24f252011-11-02 11:46:08 -070050unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
51module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Vamsi Krishna8f24f252011-11-02 11:46:08 -070053unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
54module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
57module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
60module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
63module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
66module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Jack Phameffd4ae2011-08-03 16:49:36 -070068#define BAM_CH_OPENED BIT(0)
69#define BAM_CH_READY BIT(1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070071 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 unsigned id;
73
74 struct list_head tx_idle;
75 struct sk_buff_head tx_skb_q;
76
77 struct list_head rx_idle;
78 struct sk_buff_head rx_skb_q;
79
80 struct gbam_port *port;
81 struct work_struct write_tobam_w;
82
83 /* stats */
84 unsigned int pending_with_bam;
85 unsigned int tohost_drp_cnt;
86 unsigned int tomodem_drp_cnt;
87 unsigned int tx_len;
88 unsigned int rx_len;
89 unsigned long to_modem;
90 unsigned long to_host;
91};
92
93struct gbam_port {
94 unsigned port_num;
95 spinlock_t port_lock;
96
97 struct grmnet *port_usb;
98
99 struct bam_ch_info data_ch;
100
101 struct work_struct connect_w;
102};
103
104static struct bam_portmaster {
105 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700106 struct platform_driver pdrv;
107} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
109static void gbam_start_rx(struct gbam_port *port);
110
111/*---------------misc functions---------------- */
112static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
113{
114 struct usb_request *req;
115
116 while (!list_empty(head)) {
117 req = list_entry(head->next, struct usb_request, list);
118 list_del(&req->list);
119 usb_ep_free_request(ep, req);
120 }
121}
122
123static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
124 int num,
125 void (*cb)(struct usb_ep *ep, struct usb_request *),
126 gfp_t flags)
127{
128 int i;
129 struct usb_request *req;
130
131 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
132 ep, head, num, cb);
133
134 for (i = 0; i < num; i++) {
135 req = usb_ep_alloc_request(ep, flags);
136 if (!req) {
137 pr_debug("%s: req allocated:%d\n", __func__, i);
138 return list_empty(head) ? -ENOMEM : 0;
139 }
140 req->complete = cb;
141 list_add(&req->list, head);
142 }
143
144 return 0;
145}
146/*--------------------------------------------- */
147
148/*------------data_path----------------------------*/
149static void gbam_write_data_tohost(struct gbam_port *port)
150{
151 unsigned long flags;
152 struct bam_ch_info *d = &port->data_ch;
153 struct sk_buff *skb;
154 int ret;
155 struct usb_request *req;
156 struct usb_ep *ep;
157
158 spin_lock_irqsave(&port->port_lock, flags);
159 if (!port->port_usb) {
160 spin_unlock_irqrestore(&port->port_lock, flags);
161 return;
162 }
163
164 ep = port->port_usb->in;
165
166 while (!list_empty(&d->tx_idle)) {
167 skb = __skb_dequeue(&d->tx_skb_q);
168 if (!skb) {
169 spin_unlock_irqrestore(&port->port_lock, flags);
170 return;
171 }
172 req = list_first_entry(&d->tx_idle,
173 struct usb_request,
174 list);
175 req->context = skb;
176 req->buf = skb->data;
177 req->length = skb->len;
178
179 list_del(&req->list);
180
181 spin_unlock(&port->port_lock);
182 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
183 spin_lock(&port->port_lock);
184 if (ret) {
185 pr_err("%s: usb epIn failed\n", __func__);
186 list_add(&req->list, &d->tx_idle);
187 dev_kfree_skb_any(skb);
188 break;
189 }
190 d->to_host++;
191 }
192 spin_unlock_irqrestore(&port->port_lock, flags);
193}
194
195void gbam_data_recv_cb(void *p, struct sk_buff *skb)
196{
197 struct gbam_port *port = p;
198 struct bam_ch_info *d = &port->data_ch;
199 unsigned long flags;
200
201 if (!skb)
202 return;
203
204 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
205 port, port->port_num, d, skb->len);
206
207 spin_lock_irqsave(&port->port_lock, flags);
208 if (!port->port_usb) {
209 spin_unlock_irqrestore(&port->port_lock, flags);
210 dev_kfree_skb_any(skb);
211 return;
212 }
213
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700214 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 d->tohost_drp_cnt++;
216 if (printk_ratelimit())
217 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
218 __func__, d->tohost_drp_cnt);
219 spin_unlock_irqrestore(&port->port_lock, flags);
220 dev_kfree_skb_any(skb);
221 return;
222 }
223
224 __skb_queue_tail(&d->tx_skb_q, skb);
225 spin_unlock_irqrestore(&port->port_lock, flags);
226
227 gbam_write_data_tohost(port);
228}
229
230void gbam_data_write_done(void *p, struct sk_buff *skb)
231{
232 struct gbam_port *port = p;
233 struct bam_ch_info *d = &port->data_ch;
234 unsigned long flags;
235
236 if (!skb)
237 return;
238
239 dev_kfree_skb_any(skb);
240
241 spin_lock_irqsave(&port->port_lock, flags);
242
243 d->pending_with_bam--;
244
245 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
246 port, d, d->to_modem,
247 d->pending_with_bam, port->port_num);
248
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700249 if (bam_mux_rx_fctrl_support &&
250 d->pending_with_bam >= bam_mux_rx_fctrl_dis_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251
252 spin_unlock_irqrestore(&port->port_lock, flags);
253 return;
254 }
255 spin_unlock_irqrestore(&port->port_lock, flags);
256
257 gbam_start_rx(port);
258}
259
260static void gbam_data_write_tobam(struct work_struct *w)
261{
262 struct gbam_port *port;
263 struct bam_ch_info *d;
264 struct sk_buff *skb;
265 unsigned long flags;
266 int ret;
267
268 d = container_of(w, struct bam_ch_info, write_tobam_w);
269 port = d->port;
270
271 spin_lock_irqsave(&port->port_lock, flags);
272 if (!port->port_usb) {
273 spin_unlock_irqrestore(&port->port_lock, flags);
274 return;
275 }
276
277 while ((skb = __skb_dequeue(&d->rx_skb_q))) {
278 d->pending_with_bam++;
279 d->to_modem++;
280
281 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
282 port, d, d->to_modem, d->pending_with_bam,
283 port->port_num);
284
285 spin_unlock_irqrestore(&port->port_lock, flags);
286 ret = msm_bam_dmux_write(d->id, skb);
287 spin_lock_irqsave(&port->port_lock, flags);
288 if (ret) {
289 pr_debug("%s: write error:%d\n", __func__, ret);
290 d->pending_with_bam--;
291 d->to_modem--;
292 d->tomodem_drp_cnt++;
293 dev_kfree_skb_any(skb);
294 break;
295 }
296 }
297 spin_unlock_irqrestore(&port->port_lock, flags);
298}
299/*-------------------------------------------------------------*/
300
301static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
302{
303 struct gbam_port *port = ep->driver_data;
304 struct bam_ch_info *d;
305 struct sk_buff *skb = req->context;
306 int status = req->status;
307
308 switch (status) {
309 case 0:
310 /* successful completion */
311 case -ECONNRESET:
312 case -ESHUTDOWN:
313 /* connection gone */
314 break;
315 default:
316 pr_err("%s: data tx ep error %d\n",
317 __func__, status);
318 break;
319 }
320
321 dev_kfree_skb_any(skb);
322
323 if (!port)
324 return;
325
326 spin_lock(&port->port_lock);
327 d = &port->data_ch;
328 list_add_tail(&req->list, &d->tx_idle);
329 spin_unlock(&port->port_lock);
330
331 gbam_write_data_tohost(port);
332}
333
334static void
335gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
336{
337 struct gbam_port *port = ep->driver_data;
338 struct bam_ch_info *d = &port->data_ch;
339 struct sk_buff *skb = req->context;
340 int status = req->status;
341 int queue = 0;
342
343 switch (status) {
344 case 0:
345 skb_put(skb, req->actual);
346 queue = 1;
347 break;
348 case -ECONNRESET:
349 case -ESHUTDOWN:
350 /* cable disconnection */
351 dev_kfree_skb_any(skb);
352 req->buf = 0;
353 usb_ep_free_request(ep, req);
354 return;
355 default:
356 if (printk_ratelimit())
357 pr_err("%s: %s response error %d, %d/%d\n",
358 __func__, ep->name, status,
359 req->actual, req->length);
360 dev_kfree_skb_any(skb);
361 break;
362 }
363
364 spin_lock(&port->port_lock);
365 if (queue) {
366 __skb_queue_tail(&d->rx_skb_q, skb);
367 queue_work(gbam_wq, &d->write_tobam_w);
368 }
369
370 /* TODO: Handle flow control gracefully by having
371 * having call back mechanism from bam driver
372 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700373 if (bam_mux_rx_fctrl_support &&
374 d->pending_with_bam >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375
376 list_add_tail(&req->list, &d->rx_idle);
377 spin_unlock(&port->port_lock);
378 return;
379 }
380 spin_unlock(&port->port_lock);
381
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700382 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 if (!skb) {
384 spin_lock(&port->port_lock);
385 list_add_tail(&req->list, &d->rx_idle);
386 spin_unlock(&port->port_lock);
387 return;
388 }
389 skb_reserve(skb, BAM_MUX_HDR);
390
391 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700392 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 req->context = skb;
394
395 status = usb_ep_queue(ep, req, GFP_ATOMIC);
396 if (status) {
397 dev_kfree_skb_any(skb);
398
399 if (printk_ratelimit())
400 pr_err("%s: data rx enqueue err %d\n",
401 __func__, status);
402
403 spin_lock(&port->port_lock);
404 list_add_tail(&req->list, &d->rx_idle);
405 spin_unlock(&port->port_lock);
406 }
407}
408
409static void gbam_start_rx(struct gbam_port *port)
410{
411 struct usb_request *req;
412 struct bam_ch_info *d;
413 struct usb_ep *ep;
414 unsigned long flags;
415 int ret;
416 struct sk_buff *skb;
417
418 spin_lock_irqsave(&port->port_lock, flags);
419 if (!port->port_usb) {
420 spin_unlock_irqrestore(&port->port_lock, flags);
421 return;
422 }
423
424 d = &port->data_ch;
425 ep = port->port_usb->out;
426
427 while (port->port_usb && !list_empty(&d->rx_idle)) {
428 req = list_first_entry(&d->rx_idle, struct usb_request, list);
429
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700430 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 if (!skb)
432 break;
433 skb_reserve(skb, BAM_MUX_HDR);
434
435 list_del(&req->list);
436 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700437 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 req->context = skb;
439
440 spin_unlock_irqrestore(&port->port_lock, flags);
441 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
442 spin_lock_irqsave(&port->port_lock, flags);
443 if (ret) {
444 dev_kfree_skb_any(skb);
445
446 if (printk_ratelimit())
447 pr_err("%s: rx queue failed\n", __func__);
448
449 if (port->port_usb)
450 list_add(&req->list, &d->rx_idle);
451 else
452 usb_ep_free_request(ep, req);
453 break;
454 }
455 }
456 spin_unlock_irqrestore(&port->port_lock, flags);
457}
458
459static void gbam_start_io(struct gbam_port *port)
460{
461 unsigned long flags;
462 struct usb_ep *ep;
463 int ret;
464 struct bam_ch_info *d;
465
466 pr_debug("%s: port:%p\n", __func__, port);
467
468 spin_lock_irqsave(&port->port_lock, flags);
469 if (!port->port_usb) {
470 spin_unlock_irqrestore(&port->port_lock, flags);
471 return;
472 }
473
474 d = &port->data_ch;
475 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700476 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 gbam_epout_complete, GFP_ATOMIC);
478 if (ret) {
479 pr_err("%s: rx req allocation failed\n", __func__);
480 return;
481 }
482
483 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700484 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 gbam_epin_complete, GFP_ATOMIC);
486 if (ret) {
487 pr_err("%s: tx req allocation failed\n", __func__);
488 gbam_free_requests(ep, &d->rx_idle);
489 return;
490 }
491
492 spin_unlock_irqrestore(&port->port_lock, flags);
493
494 /* queue out requests */
495 gbam_start_rx(port);
496}
497
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600498static void gbam_notify(void *p, int event, unsigned long data)
499{
500 switch (event) {
501 case BAM_DMUX_RECEIVE:
502 gbam_data_recv_cb(p, (struct sk_buff *)(data));
503 break;
504 case BAM_DMUX_WRITE_DONE:
505 gbam_data_write_done(p, (struct sk_buff *)(data));
506 break;
507 }
508}
509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510static void gbam_connect_work(struct work_struct *w)
511{
512 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
513 struct bam_ch_info *d = &port->data_ch;
514 int ret;
515
Jack Phameffd4ae2011-08-03 16:49:36 -0700516 if (!test_bit(BAM_CH_READY, &d->flags))
517 return;
518
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600519 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 if (ret) {
521 pr_err("%s: unable open bam ch:%d err:%d\n",
522 __func__, d->id, ret);
523 return;
524 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700525 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526
527 gbam_start_io(port);
528
529 pr_debug("%s: done\n", __func__);
530}
531
Jack Phameffd4ae2011-08-03 16:49:36 -0700532static void gbam_free_buffers(struct gbam_port *port)
533{
534 struct sk_buff *skb;
535 unsigned long flags;
536 struct bam_ch_info *d;
537
538 spin_lock_irqsave(&port->port_lock, flags);
539
540 if (!port || !port->port_usb)
541 goto free_buf_out;
542
543 d = &port->data_ch;
544
545 gbam_free_requests(port->port_usb->in, &d->tx_idle);
546 gbam_free_requests(port->port_usb->out, &d->rx_idle);
547
548 while ((skb = __skb_dequeue(&d->tx_skb_q)))
549 dev_kfree_skb_any(skb);
550
551 while ((skb = __skb_dequeue(&d->rx_skb_q)))
552 dev_kfree_skb_any(skb);
553
554free_buf_out:
555 spin_unlock_irqrestore(&port->port_lock, flags);
556}
557
558/* BAM data channel ready, allow attempt to open */
559static int gbam_data_ch_probe(struct platform_device *pdev)
560{
561 struct gbam_port *port;
562 struct bam_ch_info *d;
563 int i;
564 unsigned long flags;
565
566 pr_debug("%s: name:%s\n", __func__, pdev->name);
567
568 for (i = 0; i < n_bam_ports; i++) {
569 port = bam_ports[i].port;
570 d = &port->data_ch;
571
572 if (!strncmp(bam_ch_names[i], pdev->name,
573 BAM_DMUX_CH_NAME_MAX_LEN)) {
574 set_bit(BAM_CH_READY, &d->flags);
575
576 /* if usb is online, try opening bam_ch */
577 spin_lock_irqsave(&port->port_lock, flags);
578 if (port->port_usb)
579 queue_work(gbam_wq, &port->connect_w);
580 spin_unlock_irqrestore(&port->port_lock, flags);
581
582 break;
583 }
584 }
585
586 return 0;
587}
588
589/* BAM data channel went inactive, so close it */
590static int gbam_data_ch_remove(struct platform_device *pdev)
591{
592 struct gbam_port *port;
593 struct bam_ch_info *d;
594 struct usb_ep *ep_in = NULL;
595 struct usb_ep *ep_out = NULL;
596 unsigned long flags;
597 int i;
598
599 pr_debug("%s: name:%s\n", __func__, pdev->name);
600
601 for (i = 0; i < n_bam_ports; i++) {
602 if (!strncmp(bam_ch_names[i], pdev->name,
603 BAM_DMUX_CH_NAME_MAX_LEN)) {
604 port = bam_ports[i].port;
605 d = &port->data_ch;
606
607 spin_lock_irqsave(&port->port_lock, flags);
608 if (port->port_usb) {
609 ep_in = port->port_usb->in;
610 ep_out = port->port_usb->out;
611 }
612 spin_unlock_irqrestore(&port->port_lock, flags);
613
614 if (ep_in)
615 usb_ep_fifo_flush(ep_in);
616 if (ep_out)
617 usb_ep_fifo_flush(ep_out);
618
619 gbam_free_buffers(port);
620
621 msm_bam_dmux_close(d->id);
622
623 clear_bit(BAM_CH_READY, &d->flags);
624 clear_bit(BAM_CH_OPENED, &d->flags);
625 }
626 }
627
628 return 0;
629}
630
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631static void gbam_port_free(int portno)
632{
633 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700634 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
Jack Phameffd4ae2011-08-03 16:49:36 -0700636 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700638 platform_driver_unregister(pdrv);
639 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640}
641
642static int gbam_port_alloc(int portno)
643{
644 struct gbam_port *port;
645 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700646 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
648 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
649 if (!port)
650 return -ENOMEM;
651
652 port->port_num = portno;
653
654 /* port initialization */
655 spin_lock_init(&port->port_lock);
656 INIT_WORK(&port->connect_w, gbam_connect_work);
657
658 /* data ch */
659 d = &port->data_ch;
660 d->port = port;
661 INIT_LIST_HEAD(&d->tx_idle);
662 INIT_LIST_HEAD(&d->rx_idle);
663 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
664 skb_queue_head_init(&d->tx_skb_q);
665 skb_queue_head_init(&d->rx_skb_q);
666 d->id = bam_ch_ids[portno];
667
668 bam_ports[portno].port = port;
669
Jack Phameffd4ae2011-08-03 16:49:36 -0700670 pdrv = &bam_ports[portno].pdrv;
671 pdrv->probe = gbam_data_ch_probe;
672 pdrv->remove = gbam_data_ch_remove;
673 pdrv->driver.name = bam_ch_names[portno];
674 pdrv->driver.owner = THIS_MODULE;
675
676 platform_driver_register(pdrv);
677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
679
680 return 0;
681}
682
683#if defined(CONFIG_DEBUG_FS)
684#define DEBUG_BUF_SIZE 1024
685static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
686 size_t count, loff_t *ppos)
687{
688 struct gbam_port *port;
689 struct bam_ch_info *d;
690 char *buf;
691 unsigned long flags;
692 int ret;
693 int i;
694 int temp = 0;
695
696 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
697 if (!buf)
698 return -ENOMEM;
699
700 for (i = 0; i < n_bam_ports; i++) {
701 port = bam_ports[i].port;
702 if (!port)
703 continue;
704 spin_lock_irqsave(&port->port_lock, flags);
705
706 d = &port->data_ch;
707
708 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
709 "#PORT:%d port:%p data_ch:%p#\n"
710 "dpkts_to_usbhost: %lu\n"
711 "dpkts_to_modem: %lu\n"
712 "dpkts_pwith_bam: %u\n"
713 "to_usbhost_dcnt: %u\n"
714 "tomodem__dcnt: %u\n"
715 "tx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700716 "data_ch_open: %d\n"
717 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 i, port, &port->data_ch,
719 d->to_host, d->to_modem,
720 d->pending_with_bam,
721 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Jack Phameffd4ae2011-08-03 16:49:36 -0700722 d->tx_skb_q.qlen,
723 test_bit(BAM_CH_OPENED, &d->flags),
724 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725
726 spin_unlock_irqrestore(&port->port_lock, flags);
727 }
728
729 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
730
731 kfree(buf);
732
733 return ret;
734}
735
736static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
737 size_t count, loff_t *ppos)
738{
739 struct gbam_port *port;
740 struct bam_ch_info *d;
741 int i;
742 unsigned long flags;
743
744 for (i = 0; i < n_bam_ports; i++) {
745 port = bam_ports[i].port;
746 if (!port)
747 continue;
748
749 spin_lock_irqsave(&port->port_lock, flags);
750
751 d = &port->data_ch;
752
753 d->to_host = 0;
754 d->to_modem = 0;
755 d->pending_with_bam = 0;
756 d->tohost_drp_cnt = 0;
757 d->tomodem_drp_cnt = 0;
758
759 spin_unlock_irqrestore(&port->port_lock, flags);
760 }
761 return count;
762}
763
764const struct file_operations gbam_stats_ops = {
765 .read = gbam_read_stats,
766 .write = gbam_reset_stats,
767};
768
769static void gbam_debugfs_init(void)
770{
771 struct dentry *dent;
772 struct dentry *dfile;
773
774 dent = debugfs_create_dir("usb_rmnet", 0);
775 if (IS_ERR(dent))
776 return;
777
778 /* TODO: Implement cleanup function to remove created file */
779 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
780 if (!dfile || IS_ERR(dfile))
781 debugfs_remove(dent);
782}
783#else
784static void gam_debugfs_init(void) { }
785#endif
786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787void gbam_disconnect(struct grmnet *gr, u8 port_num)
788{
789 struct gbam_port *port;
790 unsigned long flags;
791 struct bam_ch_info *d;
792
793 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
794
795 if (port_num >= n_bam_ports) {
796 pr_err("%s: invalid portno#%d\n", __func__, port_num);
797 return;
798 }
799
800 if (!gr) {
801 pr_err("%s: grmnet port is null\n", __func__);
802 return;
803 }
804
805 port = bam_ports[port_num].port;
806 d = &port->data_ch;
807
808 gbam_free_buffers(port);
809
810 spin_lock_irqsave(&port->port_lock, flags);
811 port->port_usb = 0;
812 spin_unlock_irqrestore(&port->port_lock, flags);
813
814 /* disable endpoints */
815 usb_ep_disable(gr->out);
816 usb_ep_disable(gr->in);
817
Jack Phameffd4ae2011-08-03 16:49:36 -0700818 if (test_bit(BAM_CH_OPENED, &d->flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 msm_bam_dmux_close(d->id);
Jack Phameffd4ae2011-08-03 16:49:36 -0700820 clear_bit(BAM_CH_OPENED, &d->flags);
821 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822}
823
824int gbam_connect(struct grmnet *gr, u8 port_num)
825{
826 struct gbam_port *port;
827 struct bam_ch_info *d;
828 int ret;
829 unsigned long flags;
830
831 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
832
833 if (port_num >= n_bam_ports) {
834 pr_err("%s: invalid portno#%d\n", __func__, port_num);
835 return -ENODEV;
836 }
837
838 if (!gr) {
839 pr_err("%s: grmnet port is null\n", __func__);
840 return -ENODEV;
841 }
842
843 port = bam_ports[port_num].port;
844 d = &port->data_ch;
845
846 ret = usb_ep_enable(gr->in, gr->in_desc);
847 if (ret) {
848 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
849 __func__, gr->in);
850 return ret;
851 }
852 gr->in->driver_data = port;
853
854 ret = usb_ep_enable(gr->out, gr->out_desc);
855 if (ret) {
856 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
857 __func__, gr->out);
858 gr->in->driver_data = 0;
859 return ret;
860 }
861 gr->out->driver_data = port;
862
863 spin_lock_irqsave(&port->port_lock, flags);
864 port->port_usb = gr;
865
866 d->to_host = 0;
867 d->to_modem = 0;
868 d->pending_with_bam = 0;
869 d->tohost_drp_cnt = 0;
870 d->tomodem_drp_cnt = 0;
871 spin_unlock_irqrestore(&port->port_lock, flags);
872
873
874 queue_work(gbam_wq, &port->connect_w);
875
876 return 0;
877}
878
879int gbam_setup(unsigned int count)
880{
881 int i;
882 int ret;
883
884 pr_debug("%s: requested ports:%d\n", __func__, count);
885
886 if (!count || count > BAM_N_PORTS) {
887 pr_err("%s: Invalid num of ports count:%d\n",
888 __func__, count);
889 return -EINVAL;
890 }
891
892 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
893 if (!gbam_wq) {
894 pr_err("%s: Unable to create workqueue gbam_wq\n",
895 __func__);
896 return -ENOMEM;
897 }
898
899 for (i = 0; i < count; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530900 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 ret = gbam_port_alloc(i);
902 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530903 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
905 goto free_bam_ports;
906 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 }
908
909 gbam_debugfs_init();
910
911 return 0;
912free_bam_ports:
913 for (i = 0; i < n_bam_ports; i++)
914 gbam_port_free(i);
915
916 destroy_workqueue(gbam_wq);
917
918 return ret;
919}