blob: 03b43056fcf23ea415910d989c29f88b067c7348 [file] [log] [blame]
Won Kang61e12102013-07-25 03:36:17 +09001/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Joe Perches0ec473b2013-07-24 14:13:03 -070014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Won Kang61e12102013-07-25 03:36:17 +090016#include <linux/module.h>
Won Kang61e12102013-07-25 03:36:17 +090017#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/usb/cdc.h>
22#include <linux/wait.h>
23#include <linux/if_ether.h>
24#include <linux/pm_runtime.h>
25
26#include "gdm_usb.h"
27#include "gdm_lte.h"
28#include "hci.h"
29#include "hci_packet.h"
30#include "gdm_endian.h"
31
32#define USB_DEVICE_CDC_DATA(vid, pid) \
33 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
34 .idVendor = vid,\
35 .idProduct = pid,\
36 .bInterfaceClass = USB_CLASS_COMM,\
37 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
38
39#define USB_DEVICE_MASS_DATA(vid, pid) \
40 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
41 .idVendor = vid,\
42 .idProduct = pid,\
43 .bInterfaceSubClass = USB_SC_SCSI, \
44 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
45 .bInterfaceProtocol = USB_PR_BULK
46
47static const struct usb_device_id id_table[] = {
48 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
49 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
50 { }
51};
52
53MODULE_DEVICE_TABLE(usb, id_table);
54
55static struct workqueue_struct *usb_tx_wq;
56static struct workqueue_struct *usb_rx_wq;
57
58static void do_tx(struct work_struct *work);
59static void do_rx(struct work_struct *work);
60
61static int gdm_usb_recv(void *priv_dev,
62 int (*cb)(void *cb_data, void *data, int len, int context),
63 void *cb_data,
64 int context);
65
66static int request_mac_address(struct lte_udev *udev)
67{
68 u8 buf[16] = {0,};
69 struct hci_packet *hci = (struct hci_packet *)buf;
70 struct usb_device *usbdev = udev->usbdev;
71 int actual;
72 int ret = -1;
73
74 hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
75 hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
76 hci->data[0] = MAC_ADDRESS;
77
78 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
79 &actual, 1000);
80
81 udev->request_mac_addr = 1;
82
83 return ret;
84}
85
86static struct usb_tx *alloc_tx_struct(int len)
87{
88 struct usb_tx *t = NULL;
89 int ret = 0;
90
Teodora Baluta1f558642013-10-23 01:15:26 +030091 t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
Won Kang61e12102013-07-25 03:36:17 +090092 if (!t) {
93 ret = -ENOMEM;
94 goto out;
95 }
Won Kang61e12102013-07-25 03:36:17 +090096
97 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
98 if (!(len % 512))
99 len++;
100
101 t->buf = kmalloc(len, GFP_ATOMIC);
102 if (!t->urb || !t->buf) {
103 ret = -ENOMEM;
104 goto out;
105 }
106
107out:
108 if (ret < 0) {
109 if (t) {
110 usb_free_urb(t->urb);
111 kfree(t->buf);
112 kfree(t);
113 }
114 return NULL;
115 }
116
117 return t;
118}
119
120static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
121{
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300122 struct usb_tx_sdu *t_sdu;
Won Kang61e12102013-07-25 03:36:17 +0900123
Teodora Baluta1f558642013-10-23 01:15:26 +0300124 t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300125 if (!t_sdu)
126 return NULL;
Won Kang61e12102013-07-25 03:36:17 +0900127
128 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
129 if (!t_sdu->buf) {
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300130 kfree(t_sdu);
Won Kang61e12102013-07-25 03:36:17 +0900131 return NULL;
132 }
133
134 return t_sdu;
135}
136
137static void free_tx_struct(struct usb_tx *t)
138{
139 if (t) {
140 usb_free_urb(t->urb);
141 kfree(t->buf);
142 kfree(t);
143 }
144}
145
146static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
147{
148 if (t_sdu) {
149 kfree(t_sdu->buf);
150 kfree(t_sdu);
151 }
152}
153
154static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
155{
156 struct usb_tx_sdu *t_sdu;
157
158 if (list_empty(&tx->free_list))
159 return NULL;
160
161 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
162 list_del(&t_sdu->list);
163
164 tx->avail_count--;
165
166 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
167
168 return t_sdu;
169}
170
171static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
172{
173 list_add_tail(&t_sdu->list, &tx->free_list);
174 tx->avail_count++;
175}
176
177static struct usb_rx *alloc_rx_struct(void)
178{
179 struct usb_rx *r = NULL;
180 int ret = 0;
181
182 r = kmalloc(sizeof(struct usb_rx), GFP_ATOMIC);
183 if (!r) {
184 ret = -ENOMEM;
185 goto out;
186 }
187
188 r->urb = usb_alloc_urb(0, GFP_ATOMIC);
189 r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
190 if (!r->urb || !r->buf) {
191 ret = -ENOMEM;
192 goto out;
193 }
194out:
195
196 if (ret < 0) {
197 if (r) {
198 usb_free_urb(r->urb);
199 kfree(r->buf);
200 kfree(r);
201 }
202 return NULL;
203 }
204
205 return r;
206}
207
208static void free_rx_struct(struct usb_rx *r)
209{
210 if (r) {
211 usb_free_urb(r->urb);
212 kfree(r->buf);
213 kfree(r);
214 }
215}
216
217static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
218{
219 struct usb_rx *r;
220 unsigned long flags;
221
222 spin_lock_irqsave(&rx->rx_lock, flags);
223
224 if (list_empty(&rx->free_list)) {
225 spin_unlock_irqrestore(&rx->rx_lock, flags);
226 return NULL;
227 }
228
229 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
230 list_del(&r->free_list);
231
232 rx->avail_count--;
233
234 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
235
236 spin_unlock_irqrestore(&rx->rx_lock, flags);
237
238 return r;
239}
240
241static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
242{
243 unsigned long flags;
244
245 spin_lock_irqsave(&rx->rx_lock, flags);
246
247 list_add_tail(&r->free_list, &rx->free_list);
248 rx->avail_count++;
249
250 spin_unlock_irqrestore(&rx->rx_lock, flags);
251}
252
253static void release_usb(struct lte_udev *udev)
254{
255 struct rx_cxt *rx = &udev->rx;
256 struct tx_cxt *tx = &udev->tx;
257 struct usb_tx *t, *t_next;
258 struct usb_rx *r, *r_next;
259 struct usb_tx_sdu *t_sdu, *t_sdu_next;
260 unsigned long flags;
261
262 spin_lock_irqsave(&tx->lock, flags);
263 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list)
264 {
265 list_del(&t_sdu->list);
266 free_tx_sdu_struct(t_sdu);
267 }
268
269 list_for_each_entry_safe(t, t_next, &tx->hci_list, list)
270 {
271 list_del(&t->list);
272 free_tx_struct(t);
273 }
274
275 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list)
276 {
277 list_del(&t_sdu->list);
278 free_tx_sdu_struct(t_sdu);
279 }
280 spin_unlock_irqrestore(&tx->lock, flags);
281
282 spin_lock_irqsave(&rx->submit_lock, flags);
283 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
284 {
285 spin_unlock_irqrestore(&rx->submit_lock, flags);
286 usb_kill_urb(r->urb);
287 spin_lock_irqsave(&rx->submit_lock, flags);
288 }
289 spin_unlock_irqrestore(&rx->submit_lock, flags);
290
291 spin_lock_irqsave(&rx->rx_lock, flags);
292 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list)
293 {
294 list_del(&r->free_list);
295 free_rx_struct(r);
296 }
297 spin_unlock_irqrestore(&rx->rx_lock, flags);
298
299 spin_lock_irqsave(&rx->to_host_lock, flags);
300 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list)
301 {
302 if (r->index == (void *)udev) {
303 list_del(&r->to_host_list);
304 free_rx_struct(r);
305 }
306 }
307 spin_unlock_irqrestore(&rx->to_host_lock, flags);
308}
309
310static int init_usb(struct lte_udev *udev)
311{
312 int ret = 0;
313 int i;
314 struct tx_cxt *tx = &udev->tx;
315 struct rx_cxt *rx = &udev->rx;
316 struct usb_tx_sdu *t_sdu = NULL;
317 struct usb_rx *r = NULL;
318
319 udev->send_complete = 1;
320 udev->tx_stop = 0;
321 udev->request_mac_addr = 0;
322 udev->usb_state = PM_NORMAL;
323
324 INIT_LIST_HEAD(&tx->sdu_list);
325 INIT_LIST_HEAD(&tx->hci_list);
326 INIT_LIST_HEAD(&tx->free_list);
327 INIT_LIST_HEAD(&rx->rx_submit_list);
328 INIT_LIST_HEAD(&rx->free_list);
329 INIT_LIST_HEAD(&rx->to_host_list);
330 spin_lock_init(&tx->lock);
331 spin_lock_init(&rx->rx_lock);
332 spin_lock_init(&rx->submit_lock);
333 spin_lock_init(&rx->to_host_lock);
334
335 tx->avail_count = 0;
336 rx->avail_count = 0;
337
338 udev->rx_cb = NULL;
339
340 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
341 t_sdu = alloc_tx_sdu_struct();
342 if (t_sdu == NULL) {
343 ret = -ENOMEM;
344 goto fail;
345 }
346
347 list_add(&t_sdu->list, &tx->free_list);
348 tx->avail_count++;
349 }
350
351 for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
352 r = alloc_rx_struct();
353 if (r == NULL) {
354 ret = -ENOMEM;
355 goto fail;
356 }
357
358 list_add(&r->free_list, &rx->free_list);
359 rx->avail_count++;
360 }
361 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
362 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
363 return 0;
364fail:
365 return ret;
366}
367
368static int set_mac_address(u8 *data, void *arg)
369{
370 struct phy_dev *phy_dev = (struct phy_dev *)arg;
371 struct lte_udev *udev = phy_dev->priv_dev;
372 struct tlv *tlv = (struct tlv *)data;
373 u8 mac_address[ETH_ALEN] = {0, };
374
375 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
376 memcpy(mac_address, tlv->data, tlv->len);
377
378 if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
Joe Perches0ec473b2013-07-24 14:13:03 -0700379 pr_err("register lte device failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900380
381 udev->request_mac_addr = 0;
382
383 return 1;
384 }
385
386 return 0;
387}
388
389static void do_rx(struct work_struct *work)
390{
391 struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
392 struct rx_cxt *rx = &udev->rx;
393 struct usb_rx *r;
394 struct hci_packet *hci;
395 struct phy_dev *phy_dev;
396 u16 cmd_evt;
397 int ret;
398 unsigned long flags;
399
400 while (1) {
401 spin_lock_irqsave(&rx->to_host_lock, flags);
402 if (list_empty(&rx->to_host_list)) {
403 spin_unlock_irqrestore(&rx->to_host_lock, flags);
404 break;
405 }
406 r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
407 list_del(&r->to_host_list);
408 spin_unlock_irqrestore(&rx->to_host_lock, flags);
409
410 phy_dev = (struct phy_dev *)r->cb_data;
411 udev = (struct lte_udev *)phy_dev->priv_dev;
412 hci = (struct hci_packet *)r->buf;
413 cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
414
415 switch (cmd_evt) {
416 case LTE_GET_INFORMATION_RESULT:
417 if (set_mac_address(hci->data, r->cb_data) == 0) {
418 ret = r->callback(r->cb_data,
419 r->buf,
420 r->urb->actual_length,
421 KERNEL_THREAD);
422 }
423 break;
424
425 default:
426 if (r->callback) {
427 ret = r->callback(r->cb_data,
428 r->buf,
429 r->urb->actual_length,
430 KERNEL_THREAD);
431
432 if (ret == -EAGAIN)
Joe Perches0ec473b2013-07-24 14:13:03 -0700433 pr_err("failed to send received data\n");
Won Kang61e12102013-07-25 03:36:17 +0900434 }
435 break;
436 }
437
438 put_rx_struct(rx, r);
439
440 gdm_usb_recv(udev,
441 r->callback,
442 r->cb_data,
443 USB_COMPLETE);
444 }
445}
446
447static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
448{
449 unsigned long flags;
450 struct usb_rx *r_remove, *r_remove_next;
451
452 spin_lock_irqsave(&rx->submit_lock, flags);
453 list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
454 {
455 if (r == r_remove) {
456 list_del(&r->rx_submit_list);
457 break;
458 }
459 }
460 spin_unlock_irqrestore(&rx->submit_lock, flags);
461}
462
463static void gdm_usb_rcv_complete(struct urb *urb)
464{
465 struct usb_rx *r = urb->context;
466 struct rx_cxt *rx = r->rx;
467 unsigned long flags;
468 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
469 struct usb_device *usbdev = udev->usbdev;
470
471 remove_rx_submit_list(r, rx);
472
473 if (!urb->status && r->callback) {
474 spin_lock_irqsave(&rx->to_host_lock, flags);
475 list_add_tail(&r->to_host_list, &rx->to_host_list);
476 queue_work(usb_rx_wq, &udev->work_rx.work);
477 spin_unlock_irqrestore(&rx->to_host_lock, flags);
478 } else {
479 if (urb->status && udev->usb_state == PM_NORMAL)
Joe Perches0ec473b2013-07-24 14:13:03 -0700480 pr_err("%s: urb status error %d\n",
481 __func__, urb->status);
Won Kang61e12102013-07-25 03:36:17 +0900482
483 put_rx_struct(rx, r);
484 }
485
486 usb_mark_last_busy(usbdev);
487}
488
489static int gdm_usb_recv(void *priv_dev,
490 int (*cb)(void *cb_data, void *data, int len, int context),
491 void *cb_data,
492 int context)
493{
494 struct lte_udev *udev = priv_dev;
495 struct usb_device *usbdev = udev->usbdev;
496 struct rx_cxt *rx = &udev->rx;
497 struct usb_rx *r;
498 int no_spc;
499 int ret;
500 unsigned long flags;
501
502 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700503 pr_err("invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900504 return -ENODEV;
505 }
506
507 r = get_rx_struct(rx, &no_spc);
508 if (!r) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700509 pr_err("Out of Memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900510 return -ENOMEM;
511 }
512
513 udev->rx_cb = cb;
514 r->callback = cb;
515 r->cb_data = cb_data;
516 r->index = (void *)udev;
517 r->rx = rx;
518
519 usb_fill_bulk_urb(r->urb,
520 usbdev,
521 usb_rcvbulkpipe(usbdev, 0x83),
522 r->buf,
523 RX_BUF_SIZE,
524 gdm_usb_rcv_complete,
525 r);
526
527 spin_lock_irqsave(&rx->submit_lock, flags);
528 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
529 spin_unlock_irqrestore(&rx->submit_lock, flags);
530
531 if (context == KERNEL_THREAD)
532 ret = usb_submit_urb(r->urb, GFP_KERNEL);
533 else
534 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
535
536 if (ret) {
537 spin_lock_irqsave(&rx->submit_lock, flags);
538 list_del(&r->rx_submit_list);
539 spin_unlock_irqrestore(&rx->submit_lock, flags);
540
Joe Perches0ec473b2013-07-24 14:13:03 -0700541 pr_err("usb_submit_urb failed (%p)\n", r);
Won Kang61e12102013-07-25 03:36:17 +0900542 put_rx_struct(rx, r);
543 }
544
545 return ret;
546}
547
548static void gdm_usb_send_complete(struct urb *urb)
549{
550 struct usb_tx *t = urb->context;
551 struct tx_cxt *tx = t->tx;
552 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
553 unsigned long flags;
554
555 if (urb->status == -ECONNRESET) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700556 pr_info("CONNRESET\n");
Won Kang61e12102013-07-25 03:36:17 +0900557 return;
558 }
559
560 if (t->callback)
561 t->callback(t->cb_data);
562
563 free_tx_struct(t);
564
565 spin_lock_irqsave(&tx->lock, flags);
566 udev->send_complete = 1;
567 queue_work(usb_tx_wq, &udev->work_tx.work);
568 spin_unlock_irqrestore(&tx->lock, flags);
569}
570
571static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
572{
573 int ret = 0;
574
575 if (!(len%512))
576 len++;
577
578 usb_fill_bulk_urb(t->urb,
579 usbdev,
580 usb_sndbulkpipe(usbdev, 2),
581 t->buf,
582 len,
583 gdm_usb_send_complete,
584 t);
585
586 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
587
588 if (ret)
Joe Perches0ec473b2013-07-24 14:13:03 -0700589 pr_err("usb_submit_urb failed: %d\n", ret);
Won Kang61e12102013-07-25 03:36:17 +0900590
591 usb_mark_last_busy(usbdev);
592
593 return ret;
594}
595
596static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
597{
598 struct tx_cxt *tx = &udev->tx;
599 struct usb_tx_sdu *t_sdu = NULL;
600 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
601 u16 send_len = 0;
602 u16 num_packet = 0;
603 unsigned long flags;
604
605 multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
606
607 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
608 spin_lock_irqsave(&tx->lock, flags);
609 if (list_empty(&tx->sdu_list)) {
610 spin_unlock_irqrestore(&tx->lock, flags);
611 break;
612 }
613
614 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
615 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
616 spin_unlock_irqrestore(&tx->lock, flags);
617 break;
618 }
619
620 list_del(&t_sdu->list);
621 spin_unlock_irqrestore(&tx->lock, flags);
622
623 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
624
625 send_len += (t_sdu->len + 3) & 0xfffc;
626 num_packet++;
627
628 if (tx->avail_count > 10)
629 t_sdu->callback(t_sdu->cb_data);
630
631 spin_lock_irqsave(&tx->lock, flags);
632 put_tx_struct(tx, t_sdu);
633 spin_unlock_irqrestore(&tx->lock, flags);
634 }
635
636 multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
637 multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
638
639 return send_len + offsetof(struct multi_sdu, data);
640}
641
642static void do_tx(struct work_struct *work)
643{
644 struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
645 struct usb_device *usbdev = udev->usbdev;
646 struct tx_cxt *tx = &udev->tx;
647 struct usb_tx *t = NULL;
648 int is_send = 0;
649 u32 len = 0;
650 unsigned long flags;
651
652 if (!usb_autopm_get_interface(udev->intf))
653 usb_autopm_put_interface(udev->intf);
654
655 if (udev->usb_state == PM_SUSPEND)
656 return;
657
658 spin_lock_irqsave(&tx->lock, flags);
659 if (!udev->send_complete) {
660 spin_unlock_irqrestore(&tx->lock, flags);
661 return;
662 } else {
663 udev->send_complete = 0;
664 }
665
666 if (!list_empty(&tx->hci_list)) {
667 t = list_entry(tx->hci_list.next, struct usb_tx, list);
668 list_del(&t->list);
669 len = t->len;
670 t->is_sdu = 0;
671 is_send = 1;
672 } else if (!list_empty(&tx->sdu_list)) {
673 if (udev->tx_stop) {
674 udev->send_complete = 1;
675 spin_unlock_irqrestore(&tx->lock, flags);
676 return;
677 }
678
679 t = alloc_tx_struct(TX_BUF_SIZE);
680 t->callback = NULL;
681 t->tx = tx;
682 t->is_sdu = 1;
683 is_send = 1;
684 }
685
686 if (!is_send) {
687 udev->send_complete = 1;
688 spin_unlock_irqrestore(&tx->lock, flags);
689 return;
690 }
691 spin_unlock_irqrestore(&tx->lock, flags);
692
693 if (t->is_sdu)
694 len = packet_aggregation(udev, t->buf);
695
696 if (send_tx_packet(usbdev, t, len)) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700697 pr_err("send_tx_packet failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900698 t->callback = NULL;
699 gdm_usb_send_complete(t->urb);
700 }
701}
702
703#define SDU_PARAM_LEN 12
704static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
705 unsigned int dftEpsId, unsigned int epsId,
706 void (*cb)(void *data), void *cb_data,
707 int dev_idx, int nic_type)
708{
709 struct lte_udev *udev = priv_dev;
710 struct tx_cxt *tx = &udev->tx;
711 struct usb_tx_sdu *t_sdu;
712 struct sdu *sdu = NULL;
713 unsigned long flags;
714 int no_spc = 0;
715 u16 send_len;
716
717 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700718 pr_err("sdu send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900719 return TX_NO_DEV;
720 }
721
722 spin_lock_irqsave(&tx->lock, flags);
723 t_sdu = get_tx_sdu_struct(tx, &no_spc);
724 spin_unlock_irqrestore(&tx->lock, flags);
725
726 if (t_sdu == NULL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700727 pr_err("sdu send - free list empty\n");
Won Kang61e12102013-07-25 03:36:17 +0900728 return TX_NO_SPC;
729 }
730
731 sdu = (struct sdu *)t_sdu->buf;
732 sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
733 if (nic_type == NIC_TYPE_ARP) {
734 send_len = len + SDU_PARAM_LEN;
735 memcpy(sdu->data, data, len);
736 } else {
737 send_len = len - ETH_HLEN;
738 send_len += SDU_PARAM_LEN;
739 memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
740 }
741
742 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
743 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
744 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
745 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
746
747 t_sdu->len = send_len + HCI_HEADER_SIZE;
748 t_sdu->callback = cb;
749 t_sdu->cb_data = cb_data;
750
751 spin_lock_irqsave(&tx->lock, flags);
752 list_add_tail(&t_sdu->list, &tx->sdu_list);
753 queue_work(usb_tx_wq, &udev->work_tx.work);
754 spin_unlock_irqrestore(&tx->lock, flags);
755
756 if (no_spc)
757 return TX_NO_BUFFER;
758
759 return 0;
760}
761
762static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
763 void (*cb)(void *data), void *cb_data)
764{
765 struct lte_udev *udev = priv_dev;
766 struct tx_cxt *tx = &udev->tx;
767 struct usb_tx *t;
768 unsigned long flags;
769
770 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700771 pr_err("hci send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900772 return -ENODEV;
773 }
774
775 t = alloc_tx_struct(len);
776 if (t == NULL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700777 pr_err("hci_send - out of memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900778 return -ENOMEM;
779 }
780
781 memcpy(t->buf, data, len);
782 t->callback = cb;
783 t->cb_data = cb_data;
784 t->len = len;
785 t->tx = tx;
786 t->is_sdu = 0;
787
788 spin_lock_irqsave(&tx->lock, flags);
789 list_add_tail(&t->list, &tx->hci_list);
790 queue_work(usb_tx_wq, &udev->work_tx.work);
791 spin_unlock_irqrestore(&tx->lock, flags);
792
793 return 0;
794}
795
796static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
797{
798 struct lte_udev *udev = priv_dev;
799
800 return &udev->gdm_ed;
801}
802
803static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
804{
805 int ret = 0;
806 struct phy_dev *phy_dev = NULL;
807 struct lte_udev *udev = NULL;
808 u16 idVendor, idProduct;
809 int bInterfaceNumber;
810 struct usb_device *usbdev = interface_to_usbdev(intf);
811
812 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
813 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
814 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
815
Joe Perches0ec473b2013-07-24 14:13:03 -0700816 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
Won Kang61e12102013-07-25 03:36:17 +0900817
818 if (bInterfaceNumber > NETWORK_INTERFACE) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700819 pr_info("not a network device\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400820 return -ENODEV;
Won Kang61e12102013-07-25 03:36:17 +0900821 }
822
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400823 phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
824 if (!phy_dev)
825 return -ENOMEM;
Won Kang61e12102013-07-25 03:36:17 +0900826
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400827 udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900828 if (!udev) {
829 ret = -ENOMEM;
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400830 goto err_udev;
Won Kang61e12102013-07-25 03:36:17 +0900831 }
832
Won Kang61e12102013-07-25 03:36:17 +0900833 phy_dev->priv_dev = (void *)udev;
834 phy_dev->send_hci_func = gdm_usb_hci_send;
835 phy_dev->send_sdu_func = gdm_usb_sdu_send;
836 phy_dev->rcv_func = gdm_usb_recv;
837 phy_dev->get_endian = gdm_usb_get_endian;
838
839 udev->usbdev = usbdev;
840 ret = init_usb(udev);
841 if (ret < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700842 pr_err("init_usb func failed\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400843 goto err_init_usb;
Won Kang61e12102013-07-25 03:36:17 +0900844 }
845 udev->intf = intf;
846
847 intf->needs_remote_wakeup = 1;
848 usb_enable_autosuspend(usbdev);
849 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
850
851 /* List up hosts with big endians, otherwise, defaults to little endian */
852 if (idProduct == PID_GDM7243)
Won Kang9c02d0d2013-07-27 15:42:18 +0900853 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
Won Kang61e12102013-07-25 03:36:17 +0900854 else
Won Kang9c02d0d2013-07-27 15:42:18 +0900855 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
Won Kang61e12102013-07-25 03:36:17 +0900856
857 ret = request_mac_address(udev);
858 if (ret < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700859 pr_err("request Mac address failed\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400860 goto err_mac_address;
Won Kang61e12102013-07-25 03:36:17 +0900861 }
862
863 start_rx_proc(phy_dev);
Won Kang61e12102013-07-25 03:36:17 +0900864 usb_get_dev(usbdev);
865 usb_set_intfdata(intf, phy_dev);
866
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400867 return 0;
868
869err_mac_address:
870 release_usb(udev);
871err_init_usb:
872 kfree(udev);
873err_udev:
874 kfree(phy_dev);
875
Won Kang61e12102013-07-25 03:36:17 +0900876 return ret;
877}
878
879static void gdm_usb_disconnect(struct usb_interface *intf)
880{
881 struct phy_dev *phy_dev;
882 struct lte_udev *udev;
883 u16 idVendor, idProduct;
884 struct usb_device *usbdev;
885 usbdev = interface_to_usbdev(intf);
886
887 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
888 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
889
890 phy_dev = usb_get_intfdata(intf);
891
892 udev = phy_dev->priv_dev;
893 unregister_lte_device(phy_dev);
894
895 release_usb(udev);
896
897 kfree(udev);
898 udev = NULL;
899
900 kfree(phy_dev);
901 phy_dev = NULL;
902
903 usb_put_dev(usbdev);
904}
905
906static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
907{
908 struct phy_dev *phy_dev;
909 struct lte_udev *udev;
910 struct rx_cxt *rx;
911 struct usb_rx *r;
912 struct usb_rx *r_next;
913 unsigned long flags;
914
915 phy_dev = usb_get_intfdata(intf);
916 udev = phy_dev->priv_dev;
917 rx = &udev->rx;
918 if (udev->usb_state != PM_NORMAL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700919 pr_err("usb suspend - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900920 return -1;
921 }
922
923 udev->usb_state = PM_SUSPEND;
924
925 spin_lock_irqsave(&rx->submit_lock, flags);
926 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
927 {
928 spin_unlock_irqrestore(&rx->submit_lock, flags);
929 usb_kill_urb(r->urb);
930 spin_lock_irqsave(&rx->submit_lock, flags);
931 }
932 spin_unlock_irqrestore(&rx->submit_lock, flags);
933
934 return 0;
935}
936
937static int gdm_usb_resume(struct usb_interface *intf)
938{
939 struct phy_dev *phy_dev;
940 struct lte_udev *udev;
941 struct tx_cxt *tx;
942 struct rx_cxt *rx;
943 unsigned long flags;
944 int issue_count;
945 int i;
946
947 phy_dev = usb_get_intfdata(intf);
948 udev = phy_dev->priv_dev;
949 rx = &udev->rx;
950
951 if (udev->usb_state != PM_SUSPEND) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700952 pr_err("usb resume - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900953 return -1;
954 }
955 udev->usb_state = PM_NORMAL;
956
957 spin_lock_irqsave(&rx->rx_lock, flags);
958 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
959 spin_unlock_irqrestore(&rx->rx_lock, flags);
960
961 if (issue_count >= 0) {
962 for (i = 0; i < issue_count; i++)
963 gdm_usb_recv(phy_dev->priv_dev,
964 udev->rx_cb,
965 phy_dev,
966 USB_COMPLETE);
967 }
968
969 tx = &udev->tx;
970 spin_lock_irqsave(&tx->lock, flags);
971 queue_work(usb_tx_wq, &udev->work_tx.work);
972 spin_unlock_irqrestore(&tx->lock, flags);
973
974 return 0;
975}
976
977static struct usb_driver gdm_usb_lte_driver = {
978 .name = "gdm_lte",
979 .probe = gdm_usb_probe,
980 .disconnect = gdm_usb_disconnect,
981 .id_table = id_table,
982 .supports_autosuspend = 1,
983 .suspend = gdm_usb_suspend,
984 .resume = gdm_usb_resume,
985 .reset_resume = gdm_usb_resume,
986};
987
988static int __init gdm_usb_lte_init(void)
989{
990 if (gdm_lte_event_init() < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700991 pr_err("error creating event\n");
Won Kang61e12102013-07-25 03:36:17 +0900992 return -1;
993 }
994
995 usb_tx_wq = create_workqueue("usb_tx_wq");
996 if (usb_tx_wq == NULL)
997 return -1;
998
999 usb_rx_wq = create_workqueue("usb_rx_wq");
1000 if (usb_rx_wq == NULL)
1001 return -1;
1002
1003 return usb_register(&gdm_usb_lte_driver);
1004}
1005
1006static void __exit gdm_usb_lte_exit(void)
1007{
1008 gdm_lte_event_exit();
1009
1010 usb_deregister(&gdm_usb_lte_driver);
1011
1012 if (usb_tx_wq) {
1013 flush_workqueue(usb_tx_wq);
1014 destroy_workqueue(usb_tx_wq);
1015 }
1016
1017 if (usb_rx_wq) {
1018 flush_workqueue(usb_rx_wq);
1019 destroy_workqueue(usb_rx_wq);
1020 }
1021}
1022
1023module_init(gdm_usb_lte_init);
1024module_exit(gdm_usb_lte_exit);
1025
1026MODULE_VERSION(DRIVER_VERSION);
1027MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1028MODULE_LICENSE("GPL");