blob: bdc96370e4306a325e15c80d17b32becfef71bd8 [file] [log] [blame]
Won Kang61e12102013-07-25 03:36:17 +09001/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Joe Perches0ec473b2013-07-24 14:13:03 -070014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Won Kang61e12102013-07-25 03:36:17 +090016#include <linux/module.h>
Won Kang61e12102013-07-25 03:36:17 +090017#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/usb/cdc.h>
22#include <linux/wait.h>
23#include <linux/if_ether.h>
24#include <linux/pm_runtime.h>
25
26#include "gdm_usb.h"
27#include "gdm_lte.h"
28#include "hci.h"
29#include "hci_packet.h"
30#include "gdm_endian.h"
31
32#define USB_DEVICE_CDC_DATA(vid, pid) \
33 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
34 .idVendor = vid,\
35 .idProduct = pid,\
36 .bInterfaceClass = USB_CLASS_COMM,\
37 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
38
39#define USB_DEVICE_MASS_DATA(vid, pid) \
40 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
41 .idVendor = vid,\
42 .idProduct = pid,\
43 .bInterfaceSubClass = USB_SC_SCSI, \
44 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
45 .bInterfaceProtocol = USB_PR_BULK
46
47static const struct usb_device_id id_table[] = {
48 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
49 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
50 { }
51};
52
53MODULE_DEVICE_TABLE(usb, id_table);
54
55static struct workqueue_struct *usb_tx_wq;
56static struct workqueue_struct *usb_rx_wq;
57
58static void do_tx(struct work_struct *work);
59static void do_rx(struct work_struct *work);
60
61static int gdm_usb_recv(void *priv_dev,
62 int (*cb)(void *cb_data, void *data, int len, int context),
63 void *cb_data,
64 int context);
65
66static int request_mac_address(struct lte_udev *udev)
67{
68 u8 buf[16] = {0,};
69 struct hci_packet *hci = (struct hci_packet *)buf;
70 struct usb_device *usbdev = udev->usbdev;
71 int actual;
72 int ret = -1;
73
74 hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
75 hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
76 hci->data[0] = MAC_ADDRESS;
77
78 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
79 &actual, 1000);
80
81 udev->request_mac_addr = 1;
82
83 return ret;
84}
85
86static struct usb_tx *alloc_tx_struct(int len)
87{
88 struct usb_tx *t = NULL;
89 int ret = 0;
90
91 t = kmalloc(sizeof(struct usb_tx), GFP_ATOMIC);
92 if (!t) {
93 ret = -ENOMEM;
94 goto out;
95 }
96 memset(t, 0, sizeof(struct usb_tx));
97
98 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
99 if (!(len % 512))
100 len++;
101
102 t->buf = kmalloc(len, GFP_ATOMIC);
103 if (!t->urb || !t->buf) {
104 ret = -ENOMEM;
105 goto out;
106 }
107
108out:
109 if (ret < 0) {
110 if (t) {
111 usb_free_urb(t->urb);
112 kfree(t->buf);
113 kfree(t);
114 }
115 return NULL;
116 }
117
118 return t;
119}
120
121static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
122{
123 struct usb_tx_sdu *t_sdu = NULL;
124 int ret = 0;
125
126
127 t_sdu = kmalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
128 if (!t_sdu) {
129 ret = -ENOMEM;
130 goto out;
131 }
132 memset(t_sdu, 0, sizeof(struct usb_tx_sdu));
133
134 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
135 if (!t_sdu->buf) {
136 ret = -ENOMEM;
137 goto out;
138 }
139out:
140
141 if (ret < 0) {
142 if (t_sdu) {
143 kfree(t_sdu->buf);
144 kfree(t_sdu);
145 }
146 return NULL;
147 }
148
149 return t_sdu;
150}
151
152static void free_tx_struct(struct usb_tx *t)
153{
154 if (t) {
155 usb_free_urb(t->urb);
156 kfree(t->buf);
157 kfree(t);
158 }
159}
160
161static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
162{
163 if (t_sdu) {
164 kfree(t_sdu->buf);
165 kfree(t_sdu);
166 }
167}
168
169static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
170{
171 struct usb_tx_sdu *t_sdu;
172
173 if (list_empty(&tx->free_list))
174 return NULL;
175
176 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
177 list_del(&t_sdu->list);
178
179 tx->avail_count--;
180
181 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
182
183 return t_sdu;
184}
185
186static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
187{
188 list_add_tail(&t_sdu->list, &tx->free_list);
189 tx->avail_count++;
190}
191
192static struct usb_rx *alloc_rx_struct(void)
193{
194 struct usb_rx *r = NULL;
195 int ret = 0;
196
197 r = kmalloc(sizeof(struct usb_rx), GFP_ATOMIC);
198 if (!r) {
199 ret = -ENOMEM;
200 goto out;
201 }
202
203 r->urb = usb_alloc_urb(0, GFP_ATOMIC);
204 r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
205 if (!r->urb || !r->buf) {
206 ret = -ENOMEM;
207 goto out;
208 }
209out:
210
211 if (ret < 0) {
212 if (r) {
213 usb_free_urb(r->urb);
214 kfree(r->buf);
215 kfree(r);
216 }
217 return NULL;
218 }
219
220 return r;
221}
222
223static void free_rx_struct(struct usb_rx *r)
224{
225 if (r) {
226 usb_free_urb(r->urb);
227 kfree(r->buf);
228 kfree(r);
229 }
230}
231
232static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
233{
234 struct usb_rx *r;
235 unsigned long flags;
236
237 spin_lock_irqsave(&rx->rx_lock, flags);
238
239 if (list_empty(&rx->free_list)) {
240 spin_unlock_irqrestore(&rx->rx_lock, flags);
241 return NULL;
242 }
243
244 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
245 list_del(&r->free_list);
246
247 rx->avail_count--;
248
249 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
250
251 spin_unlock_irqrestore(&rx->rx_lock, flags);
252
253 return r;
254}
255
256static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
257{
258 unsigned long flags;
259
260 spin_lock_irqsave(&rx->rx_lock, flags);
261
262 list_add_tail(&r->free_list, &rx->free_list);
263 rx->avail_count++;
264
265 spin_unlock_irqrestore(&rx->rx_lock, flags);
266}
267
268static void release_usb(struct lte_udev *udev)
269{
270 struct rx_cxt *rx = &udev->rx;
271 struct tx_cxt *tx = &udev->tx;
272 struct usb_tx *t, *t_next;
273 struct usb_rx *r, *r_next;
274 struct usb_tx_sdu *t_sdu, *t_sdu_next;
275 unsigned long flags;
276
277 spin_lock_irqsave(&tx->lock, flags);
278 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list)
279 {
280 list_del(&t_sdu->list);
281 free_tx_sdu_struct(t_sdu);
282 }
283
284 list_for_each_entry_safe(t, t_next, &tx->hci_list, list)
285 {
286 list_del(&t->list);
287 free_tx_struct(t);
288 }
289
290 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list)
291 {
292 list_del(&t_sdu->list);
293 free_tx_sdu_struct(t_sdu);
294 }
295 spin_unlock_irqrestore(&tx->lock, flags);
296
297 spin_lock_irqsave(&rx->submit_lock, flags);
298 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
299 {
300 spin_unlock_irqrestore(&rx->submit_lock, flags);
301 usb_kill_urb(r->urb);
302 spin_lock_irqsave(&rx->submit_lock, flags);
303 }
304 spin_unlock_irqrestore(&rx->submit_lock, flags);
305
306 spin_lock_irqsave(&rx->rx_lock, flags);
307 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list)
308 {
309 list_del(&r->free_list);
310 free_rx_struct(r);
311 }
312 spin_unlock_irqrestore(&rx->rx_lock, flags);
313
314 spin_lock_irqsave(&rx->to_host_lock, flags);
315 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list)
316 {
317 if (r->index == (void *)udev) {
318 list_del(&r->to_host_list);
319 free_rx_struct(r);
320 }
321 }
322 spin_unlock_irqrestore(&rx->to_host_lock, flags);
323}
324
325static int init_usb(struct lte_udev *udev)
326{
327 int ret = 0;
328 int i;
329 struct tx_cxt *tx = &udev->tx;
330 struct rx_cxt *rx = &udev->rx;
331 struct usb_tx_sdu *t_sdu = NULL;
332 struct usb_rx *r = NULL;
333
334 udev->send_complete = 1;
335 udev->tx_stop = 0;
336 udev->request_mac_addr = 0;
337 udev->usb_state = PM_NORMAL;
338
339 INIT_LIST_HEAD(&tx->sdu_list);
340 INIT_LIST_HEAD(&tx->hci_list);
341 INIT_LIST_HEAD(&tx->free_list);
342 INIT_LIST_HEAD(&rx->rx_submit_list);
343 INIT_LIST_HEAD(&rx->free_list);
344 INIT_LIST_HEAD(&rx->to_host_list);
345 spin_lock_init(&tx->lock);
346 spin_lock_init(&rx->rx_lock);
347 spin_lock_init(&rx->submit_lock);
348 spin_lock_init(&rx->to_host_lock);
349
350 tx->avail_count = 0;
351 rx->avail_count = 0;
352
353 udev->rx_cb = NULL;
354
355 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
356 t_sdu = alloc_tx_sdu_struct();
357 if (t_sdu == NULL) {
358 ret = -ENOMEM;
359 goto fail;
360 }
361
362 list_add(&t_sdu->list, &tx->free_list);
363 tx->avail_count++;
364 }
365
366 for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
367 r = alloc_rx_struct();
368 if (r == NULL) {
369 ret = -ENOMEM;
370 goto fail;
371 }
372
373 list_add(&r->free_list, &rx->free_list);
374 rx->avail_count++;
375 }
376 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
377 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
378 return 0;
379fail:
380 return ret;
381}
382
383static int set_mac_address(u8 *data, void *arg)
384{
385 struct phy_dev *phy_dev = (struct phy_dev *)arg;
386 struct lte_udev *udev = phy_dev->priv_dev;
387 struct tlv *tlv = (struct tlv *)data;
388 u8 mac_address[ETH_ALEN] = {0, };
389
390 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
391 memcpy(mac_address, tlv->data, tlv->len);
392
393 if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
Joe Perches0ec473b2013-07-24 14:13:03 -0700394 pr_err("register lte device failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900395
396 udev->request_mac_addr = 0;
397
398 return 1;
399 }
400
401 return 0;
402}
403
404static void do_rx(struct work_struct *work)
405{
406 struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
407 struct rx_cxt *rx = &udev->rx;
408 struct usb_rx *r;
409 struct hci_packet *hci;
410 struct phy_dev *phy_dev;
411 u16 cmd_evt;
412 int ret;
413 unsigned long flags;
414
415 while (1) {
416 spin_lock_irqsave(&rx->to_host_lock, flags);
417 if (list_empty(&rx->to_host_list)) {
418 spin_unlock_irqrestore(&rx->to_host_lock, flags);
419 break;
420 }
421 r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
422 list_del(&r->to_host_list);
423 spin_unlock_irqrestore(&rx->to_host_lock, flags);
424
425 phy_dev = (struct phy_dev *)r->cb_data;
426 udev = (struct lte_udev *)phy_dev->priv_dev;
427 hci = (struct hci_packet *)r->buf;
428 cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
429
430 switch (cmd_evt) {
431 case LTE_GET_INFORMATION_RESULT:
432 if (set_mac_address(hci->data, r->cb_data) == 0) {
433 ret = r->callback(r->cb_data,
434 r->buf,
435 r->urb->actual_length,
436 KERNEL_THREAD);
437 }
438 break;
439
440 default:
441 if (r->callback) {
442 ret = r->callback(r->cb_data,
443 r->buf,
444 r->urb->actual_length,
445 KERNEL_THREAD);
446
447 if (ret == -EAGAIN)
Joe Perches0ec473b2013-07-24 14:13:03 -0700448 pr_err("failed to send received data\n");
Won Kang61e12102013-07-25 03:36:17 +0900449 }
450 break;
451 }
452
453 put_rx_struct(rx, r);
454
455 gdm_usb_recv(udev,
456 r->callback,
457 r->cb_data,
458 USB_COMPLETE);
459 }
460}
461
462static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
463{
464 unsigned long flags;
465 struct usb_rx *r_remove, *r_remove_next;
466
467 spin_lock_irqsave(&rx->submit_lock, flags);
468 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
469 {
470 if (r == r_remove) {
471 list_del(&r->rx_submit_list);
472 break;
473 }
474 }
475 spin_unlock_irqrestore(&rx->submit_lock, flags);
476}
477
478static void gdm_usb_rcv_complete(struct urb *urb)
479{
480 struct usb_rx *r = urb->context;
481 struct rx_cxt *rx = r->rx;
482 unsigned long flags;
483 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
484 struct usb_device *usbdev = udev->usbdev;
485
486 remove_rx_submit_list(r, rx);
487
488 if (!urb->status && r->callback) {
489 spin_lock_irqsave(&rx->to_host_lock, flags);
490 list_add_tail(&r->to_host_list, &rx->to_host_list);
491 queue_work(usb_rx_wq, &udev->work_rx.work);
492 spin_unlock_irqrestore(&rx->to_host_lock, flags);
493 } else {
494 if (urb->status && udev->usb_state == PM_NORMAL)
Joe Perches0ec473b2013-07-24 14:13:03 -0700495 pr_err("%s: urb status error %d\n",
496 __func__, urb->status);
Won Kang61e12102013-07-25 03:36:17 +0900497
498 put_rx_struct(rx, r);
499 }
500
501 usb_mark_last_busy(usbdev);
502}
503
504static int gdm_usb_recv(void *priv_dev,
505 int (*cb)(void *cb_data, void *data, int len, int context),
506 void *cb_data,
507 int context)
508{
509 struct lte_udev *udev = priv_dev;
510 struct usb_device *usbdev = udev->usbdev;
511 struct rx_cxt *rx = &udev->rx;
512 struct usb_rx *r;
513 int no_spc;
514 int ret;
515 unsigned long flags;
516
517 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700518 pr_err("invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900519 return -ENODEV;
520 }
521
522 r = get_rx_struct(rx, &no_spc);
523 if (!r) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700524 pr_err("Out of Memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900525 return -ENOMEM;
526 }
527
528 udev->rx_cb = cb;
529 r->callback = cb;
530 r->cb_data = cb_data;
531 r->index = (void *)udev;
532 r->rx = rx;
533
534 usb_fill_bulk_urb(r->urb,
535 usbdev,
536 usb_rcvbulkpipe(usbdev, 0x83),
537 r->buf,
538 RX_BUF_SIZE,
539 gdm_usb_rcv_complete,
540 r);
541
542 spin_lock_irqsave(&rx->submit_lock, flags);
543 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
544 spin_unlock_irqrestore(&rx->submit_lock, flags);
545
546 if (context == KERNEL_THREAD)
547 ret = usb_submit_urb(r->urb, GFP_KERNEL);
548 else
549 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
550
551 if (ret) {
552 spin_lock_irqsave(&rx->submit_lock, flags);
553 list_del(&r->rx_submit_list);
554 spin_unlock_irqrestore(&rx->submit_lock, flags);
555
Joe Perches0ec473b2013-07-24 14:13:03 -0700556 pr_err("usb_submit_urb failed (%p)\n", r);
Won Kang61e12102013-07-25 03:36:17 +0900557 put_rx_struct(rx, r);
558 }
559
560 return ret;
561}
562
563static void gdm_usb_send_complete(struct urb *urb)
564{
565 struct usb_tx *t = urb->context;
566 struct tx_cxt *tx = t->tx;
567 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
568 unsigned long flags;
569
570 if (urb->status == -ECONNRESET) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700571 pr_info("CONNRESET\n");
Won Kang61e12102013-07-25 03:36:17 +0900572 return;
573 }
574
575 if (t->callback)
576 t->callback(t->cb_data);
577
578 free_tx_struct(t);
579
580 spin_lock_irqsave(&tx->lock, flags);
581 udev->send_complete = 1;
582 queue_work(usb_tx_wq, &udev->work_tx.work);
583 spin_unlock_irqrestore(&tx->lock, flags);
584}
585
586static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
587{
588 int ret = 0;
589
590 if (!(len%512))
591 len++;
592
593 usb_fill_bulk_urb(t->urb,
594 usbdev,
595 usb_sndbulkpipe(usbdev, 2),
596 t->buf,
597 len,
598 gdm_usb_send_complete,
599 t);
600
601 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
602
603 if (ret)
Joe Perches0ec473b2013-07-24 14:13:03 -0700604 pr_err("usb_submit_urb failed: %d\n", ret);
Won Kang61e12102013-07-25 03:36:17 +0900605
606 usb_mark_last_busy(usbdev);
607
608 return ret;
609}
610
611static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
612{
613 struct tx_cxt *tx = &udev->tx;
614 struct usb_tx_sdu *t_sdu = NULL;
615 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
616 u16 send_len = 0;
617 u16 num_packet = 0;
618 unsigned long flags;
619
620 multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
621
622 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
623 spin_lock_irqsave(&tx->lock, flags);
624 if (list_empty(&tx->sdu_list)) {
625 spin_unlock_irqrestore(&tx->lock, flags);
626 break;
627 }
628
629 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
630 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
631 spin_unlock_irqrestore(&tx->lock, flags);
632 break;
633 }
634
635 list_del(&t_sdu->list);
636 spin_unlock_irqrestore(&tx->lock, flags);
637
638 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
639
640 send_len += (t_sdu->len + 3) & 0xfffc;
641 num_packet++;
642
643 if (tx->avail_count > 10)
644 t_sdu->callback(t_sdu->cb_data);
645
646 spin_lock_irqsave(&tx->lock, flags);
647 put_tx_struct(tx, t_sdu);
648 spin_unlock_irqrestore(&tx->lock, flags);
649 }
650
651 multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
652 multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
653
654 return send_len + offsetof(struct multi_sdu, data);
655}
656
657static void do_tx(struct work_struct *work)
658{
659 struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
660 struct usb_device *usbdev = udev->usbdev;
661 struct tx_cxt *tx = &udev->tx;
662 struct usb_tx *t = NULL;
663 int is_send = 0;
664 u32 len = 0;
665 unsigned long flags;
666
667 if (!usb_autopm_get_interface(udev->intf))
668 usb_autopm_put_interface(udev->intf);
669
670 if (udev->usb_state == PM_SUSPEND)
671 return;
672
673 spin_lock_irqsave(&tx->lock, flags);
674 if (!udev->send_complete) {
675 spin_unlock_irqrestore(&tx->lock, flags);
676 return;
677 } else {
678 udev->send_complete = 0;
679 }
680
681 if (!list_empty(&tx->hci_list)) {
682 t = list_entry(tx->hci_list.next, struct usb_tx, list);
683 list_del(&t->list);
684 len = t->len;
685 t->is_sdu = 0;
686 is_send = 1;
687 } else if (!list_empty(&tx->sdu_list)) {
688 if (udev->tx_stop) {
689 udev->send_complete = 1;
690 spin_unlock_irqrestore(&tx->lock, flags);
691 return;
692 }
693
694 t = alloc_tx_struct(TX_BUF_SIZE);
695 t->callback = NULL;
696 t->tx = tx;
697 t->is_sdu = 1;
698 is_send = 1;
699 }
700
701 if (!is_send) {
702 udev->send_complete = 1;
703 spin_unlock_irqrestore(&tx->lock, flags);
704 return;
705 }
706 spin_unlock_irqrestore(&tx->lock, flags);
707
708 if (t->is_sdu)
709 len = packet_aggregation(udev, t->buf);
710
711 if (send_tx_packet(usbdev, t, len)) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700712 pr_err("send_tx_packet failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900713 t->callback = NULL;
714 gdm_usb_send_complete(t->urb);
715 }
716}
717
718#define SDU_PARAM_LEN 12
719static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
720 unsigned int dftEpsId, unsigned int epsId,
721 void (*cb)(void *data), void *cb_data,
722 int dev_idx, int nic_type)
723{
724 struct lte_udev *udev = priv_dev;
725 struct tx_cxt *tx = &udev->tx;
726 struct usb_tx_sdu *t_sdu;
727 struct sdu *sdu = NULL;
728 unsigned long flags;
729 int no_spc = 0;
730 u16 send_len;
731
732 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700733 pr_err("sdu send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900734 return TX_NO_DEV;
735 }
736
737 spin_lock_irqsave(&tx->lock, flags);
738 t_sdu = get_tx_sdu_struct(tx, &no_spc);
739 spin_unlock_irqrestore(&tx->lock, flags);
740
741 if (t_sdu == NULL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700742 pr_err("sdu send - free list empty\n");
Won Kang61e12102013-07-25 03:36:17 +0900743 return TX_NO_SPC;
744 }
745
746 sdu = (struct sdu *)t_sdu->buf;
747 sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
748 if (nic_type == NIC_TYPE_ARP) {
749 send_len = len + SDU_PARAM_LEN;
750 memcpy(sdu->data, data, len);
751 } else {
752 send_len = len - ETH_HLEN;
753 send_len += SDU_PARAM_LEN;
754 memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
755 }
756
757 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
758 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
759 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
760 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
761
762 t_sdu->len = send_len + HCI_HEADER_SIZE;
763 t_sdu->callback = cb;
764 t_sdu->cb_data = cb_data;
765
766 spin_lock_irqsave(&tx->lock, flags);
767 list_add_tail(&t_sdu->list, &tx->sdu_list);
768 queue_work(usb_tx_wq, &udev->work_tx.work);
769 spin_unlock_irqrestore(&tx->lock, flags);
770
771 if (no_spc)
772 return TX_NO_BUFFER;
773
774 return 0;
775}
776
777static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
778 void (*cb)(void *data), void *cb_data)
779{
780 struct lte_udev *udev = priv_dev;
781 struct tx_cxt *tx = &udev->tx;
782 struct usb_tx *t;
783 unsigned long flags;
784
785 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700786 pr_err("hci send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900787 return -ENODEV;
788 }
789
790 t = alloc_tx_struct(len);
791 if (t == NULL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700792 pr_err("hci_send - out of memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900793 return -ENOMEM;
794 }
795
796 memcpy(t->buf, data, len);
797 t->callback = cb;
798 t->cb_data = cb_data;
799 t->len = len;
800 t->tx = tx;
801 t->is_sdu = 0;
802
803 spin_lock_irqsave(&tx->lock, flags);
804 list_add_tail(&t->list, &tx->hci_list);
805 queue_work(usb_tx_wq, &udev->work_tx.work);
806 spin_unlock_irqrestore(&tx->lock, flags);
807
808 return 0;
809}
810
811static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
812{
813 struct lte_udev *udev = priv_dev;
814
815 return &udev->gdm_ed;
816}
817
818static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
819{
820 int ret = 0;
821 struct phy_dev *phy_dev = NULL;
822 struct lte_udev *udev = NULL;
823 u16 idVendor, idProduct;
824 int bInterfaceNumber;
825 struct usb_device *usbdev = interface_to_usbdev(intf);
826
827 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
828 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
829 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
830
Joe Perches0ec473b2013-07-24 14:13:03 -0700831 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
Won Kang61e12102013-07-25 03:36:17 +0900832
833 if (bInterfaceNumber > NETWORK_INTERFACE) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700834 pr_info("not a network device\n");
Won Kang61e12102013-07-25 03:36:17 +0900835 return -1;
836 }
837
838 phy_dev = kmalloc(sizeof(struct phy_dev), GFP_ATOMIC);
839 if (!phy_dev) {
840 ret = -ENOMEM;
841 goto out;
842 }
843
844 udev = kmalloc(sizeof(struct lte_udev), GFP_ATOMIC);
845 if (!udev) {
846 ret = -ENOMEM;
847 goto out;
848 }
849
850 memset(phy_dev, 0, sizeof(struct phy_dev));
851 memset(udev, 0, sizeof(struct lte_udev));
852
853 phy_dev->priv_dev = (void *)udev;
854 phy_dev->send_hci_func = gdm_usb_hci_send;
855 phy_dev->send_sdu_func = gdm_usb_sdu_send;
856 phy_dev->rcv_func = gdm_usb_recv;
857 phy_dev->get_endian = gdm_usb_get_endian;
858
859 udev->usbdev = usbdev;
860 ret = init_usb(udev);
861 if (ret < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700862 pr_err("init_usb func failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900863 goto out;
864 }
865 udev->intf = intf;
866
867 intf->needs_remote_wakeup = 1;
868 usb_enable_autosuspend(usbdev);
869 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
870
871 /* List up hosts with big endians, otherwise, defaults to little endian */
872 if (idProduct == PID_GDM7243)
Won Kang9c02d0d2013-07-27 15:42:18 +0900873 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
Won Kang61e12102013-07-25 03:36:17 +0900874 else
Won Kang9c02d0d2013-07-27 15:42:18 +0900875 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
Won Kang61e12102013-07-25 03:36:17 +0900876
877 ret = request_mac_address(udev);
878 if (ret < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700879 pr_err("request Mac address failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900880 goto out;
881 }
882
883 start_rx_proc(phy_dev);
884out:
885
886 if (ret < 0) {
887 kfree(phy_dev);
888 if (udev) {
889 release_usb(udev);
890 kfree(udev);
891 }
892 }
893
894 usb_get_dev(usbdev);
895 usb_set_intfdata(intf, phy_dev);
896
897 return ret;
898}
899
900static void gdm_usb_disconnect(struct usb_interface *intf)
901{
902 struct phy_dev *phy_dev;
903 struct lte_udev *udev;
904 u16 idVendor, idProduct;
905 struct usb_device *usbdev;
906 usbdev = interface_to_usbdev(intf);
907
908 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
909 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
910
911 phy_dev = usb_get_intfdata(intf);
912
913 udev = phy_dev->priv_dev;
914 unregister_lte_device(phy_dev);
915
916 release_usb(udev);
917
918 kfree(udev);
919 udev = NULL;
920
921 kfree(phy_dev);
922 phy_dev = NULL;
923
924 usb_put_dev(usbdev);
925}
926
927static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
928{
929 struct phy_dev *phy_dev;
930 struct lte_udev *udev;
931 struct rx_cxt *rx;
932 struct usb_rx *r;
933 struct usb_rx *r_next;
934 unsigned long flags;
935
936 phy_dev = usb_get_intfdata(intf);
937 udev = phy_dev->priv_dev;
938 rx = &udev->rx;
939 if (udev->usb_state != PM_NORMAL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700940 pr_err("usb suspend - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900941 return -1;
942 }
943
944 udev->usb_state = PM_SUSPEND;
945
946 spin_lock_irqsave(&rx->submit_lock, flags);
947 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
948 {
949 spin_unlock_irqrestore(&rx->submit_lock, flags);
950 usb_kill_urb(r->urb);
951 spin_lock_irqsave(&rx->submit_lock, flags);
952 }
953 spin_unlock_irqrestore(&rx->submit_lock, flags);
954
955 return 0;
956}
957
958static int gdm_usb_resume(struct usb_interface *intf)
959{
960 struct phy_dev *phy_dev;
961 struct lte_udev *udev;
962 struct tx_cxt *tx;
963 struct rx_cxt *rx;
964 unsigned long flags;
965 int issue_count;
966 int i;
967
968 phy_dev = usb_get_intfdata(intf);
969 udev = phy_dev->priv_dev;
970 rx = &udev->rx;
971
972 if (udev->usb_state != PM_SUSPEND) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700973 pr_err("usb resume - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900974 return -1;
975 }
976 udev->usb_state = PM_NORMAL;
977
978 spin_lock_irqsave(&rx->rx_lock, flags);
979 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
980 spin_unlock_irqrestore(&rx->rx_lock, flags);
981
982 if (issue_count >= 0) {
983 for (i = 0; i < issue_count; i++)
984 gdm_usb_recv(phy_dev->priv_dev,
985 udev->rx_cb,
986 phy_dev,
987 USB_COMPLETE);
988 }
989
990 tx = &udev->tx;
991 spin_lock_irqsave(&tx->lock, flags);
992 queue_work(usb_tx_wq, &udev->work_tx.work);
993 spin_unlock_irqrestore(&tx->lock, flags);
994
995 return 0;
996}
997
998static struct usb_driver gdm_usb_lte_driver = {
999 .name = "gdm_lte",
1000 .probe = gdm_usb_probe,
1001 .disconnect = gdm_usb_disconnect,
1002 .id_table = id_table,
1003 .supports_autosuspend = 1,
1004 .suspend = gdm_usb_suspend,
1005 .resume = gdm_usb_resume,
1006 .reset_resume = gdm_usb_resume,
1007};
1008
1009static int __init gdm_usb_lte_init(void)
1010{
1011 if (gdm_lte_event_init() < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -07001012 pr_err("error creating event\n");
Won Kang61e12102013-07-25 03:36:17 +09001013 return -1;
1014 }
1015
1016 usb_tx_wq = create_workqueue("usb_tx_wq");
1017 if (usb_tx_wq == NULL)
1018 return -1;
1019
1020 usb_rx_wq = create_workqueue("usb_rx_wq");
1021 if (usb_rx_wq == NULL)
1022 return -1;
1023
1024 return usb_register(&gdm_usb_lte_driver);
1025}
1026
1027static void __exit gdm_usb_lte_exit(void)
1028{
1029 gdm_lte_event_exit();
1030
1031 usb_deregister(&gdm_usb_lte_driver);
1032
1033 if (usb_tx_wq) {
1034 flush_workqueue(usb_tx_wq);
1035 destroy_workqueue(usb_tx_wq);
1036 }
1037
1038 if (usb_rx_wq) {
1039 flush_workqueue(usb_rx_wq);
1040 destroy_workqueue(usb_rx_wq);
1041 }
1042}
1043
1044module_init(gdm_usb_lte_init);
1045module_exit(gdm_usb_lte_exit);
1046
1047MODULE_VERSION(DRIVER_VERSION);
1048MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1049MODULE_LICENSE("GPL");