blob: 0c1b2de2b57d03da73c13352240c28fc154d1fdc [file] [log] [blame]
Won Kang61e12102013-07-25 03:36:17 +09001/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Joe Perches0ec473b2013-07-24 14:13:03 -070014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Won Kang61e12102013-07-25 03:36:17 +090016#include <linux/module.h>
Won Kang61e12102013-07-25 03:36:17 +090017#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/usb/cdc.h>
22#include <linux/wait.h>
23#include <linux/if_ether.h>
24#include <linux/pm_runtime.h>
25
26#include "gdm_usb.h"
27#include "gdm_lte.h"
28#include "hci.h"
29#include "hci_packet.h"
30#include "gdm_endian.h"
31
32#define USB_DEVICE_CDC_DATA(vid, pid) \
Aybuke Ozdemir35db0352014-03-14 23:55:05 +020033 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
34 USB_DEVICE_ID_MATCH_INT_CLASS | \
35 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
Won Kang61e12102013-07-25 03:36:17 +090036 .idVendor = vid,\
37 .idProduct = pid,\
38 .bInterfaceClass = USB_CLASS_COMM,\
39 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
40
41#define USB_DEVICE_MASS_DATA(vid, pid) \
Aybuke Ozdemir35db0352014-03-14 23:55:05 +020042 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
43 USB_DEVICE_ID_MATCH_INT_INFO,\
Won Kang61e12102013-07-25 03:36:17 +090044 .idVendor = vid,\
45 .idProduct = pid,\
46 .bInterfaceSubClass = USB_SC_SCSI, \
47 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
48 .bInterfaceProtocol = USB_PR_BULK
49
50static const struct usb_device_id id_table[] = {
51 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
52 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
53 { }
54};
55
56MODULE_DEVICE_TABLE(usb, id_table);
57
58static struct workqueue_struct *usb_tx_wq;
59static struct workqueue_struct *usb_rx_wq;
60
61static void do_tx(struct work_struct *work);
62static void do_rx(struct work_struct *work);
63
64static int gdm_usb_recv(void *priv_dev,
Aybuke Ozdemir35db0352014-03-14 23:55:05 +020065 int (*cb)(void *cb_data,
66 void *data, int len, int context),
Won Kang61e12102013-07-25 03:36:17 +090067 void *cb_data,
68 int context);
69
70static int request_mac_address(struct lte_udev *udev)
71{
72 u8 buf[16] = {0,};
73 struct hci_packet *hci = (struct hci_packet *)buf;
74 struct usb_device *usbdev = udev->usbdev;
75 int actual;
76 int ret = -1;
77
78 hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
79 hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
80 hci->data[0] = MAC_ADDRESS;
81
82 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
83 &actual, 1000);
84
85 udev->request_mac_addr = 1;
86
87 return ret;
88}
89
90static struct usb_tx *alloc_tx_struct(int len)
91{
92 struct usb_tx *t = NULL;
93 int ret = 0;
94
Teodora Baluta1f558642013-10-23 01:15:26 +030095 t = kzalloc(sizeof(struct usb_tx), GFP_ATOMIC);
Won Kang61e12102013-07-25 03:36:17 +090096 if (!t) {
97 ret = -ENOMEM;
98 goto out;
99 }
Won Kang61e12102013-07-25 03:36:17 +0900100
101 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
102 if (!(len % 512))
103 len++;
104
105 t->buf = kmalloc(len, GFP_ATOMIC);
106 if (!t->urb || !t->buf) {
107 ret = -ENOMEM;
108 goto out;
109 }
110
111out:
112 if (ret < 0) {
113 if (t) {
114 usb_free_urb(t->urb);
115 kfree(t->buf);
116 kfree(t);
117 }
118 return NULL;
119 }
120
121 return t;
122}
123
124static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
125{
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300126 struct usb_tx_sdu *t_sdu;
Won Kang61e12102013-07-25 03:36:17 +0900127
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400128 t_sdu = kzalloc(sizeof(struct usb_tx_sdu), GFP_KERNEL);
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300129 if (!t_sdu)
130 return NULL;
Won Kang61e12102013-07-25 03:36:17 +0900131
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400132 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900133 if (!t_sdu->buf) {
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300134 kfree(t_sdu);
Won Kang61e12102013-07-25 03:36:17 +0900135 return NULL;
136 }
137
138 return t_sdu;
139}
140
141static void free_tx_struct(struct usb_tx *t)
142{
143 if (t) {
144 usb_free_urb(t->urb);
145 kfree(t->buf);
146 kfree(t);
147 }
148}
149
150static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
151{
152 if (t_sdu) {
153 kfree(t_sdu->buf);
154 kfree(t_sdu);
155 }
156}
157
158static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
159{
160 struct usb_tx_sdu *t_sdu;
161
162 if (list_empty(&tx->free_list))
163 return NULL;
164
165 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
166 list_del(&t_sdu->list);
167
168 tx->avail_count--;
169
170 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
171
172 return t_sdu;
173}
174
175static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
176{
177 list_add_tail(&t_sdu->list, &tx->free_list);
178 tx->avail_count++;
179}
180
181static struct usb_rx *alloc_rx_struct(void)
182{
183 struct usb_rx *r = NULL;
184 int ret = 0;
185
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400186 r = kmalloc(sizeof(struct usb_rx), GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900187 if (!r) {
188 ret = -ENOMEM;
189 goto out;
190 }
191
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400192 r->urb = usb_alloc_urb(0, GFP_KERNEL);
193 r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900194 if (!r->urb || !r->buf) {
195 ret = -ENOMEM;
196 goto out;
197 }
198out:
199
200 if (ret < 0) {
201 if (r) {
202 usb_free_urb(r->urb);
203 kfree(r->buf);
204 kfree(r);
205 }
206 return NULL;
207 }
208
209 return r;
210}
211
212static void free_rx_struct(struct usb_rx *r)
213{
214 if (r) {
215 usb_free_urb(r->urb);
216 kfree(r->buf);
217 kfree(r);
218 }
219}
220
221static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
222{
223 struct usb_rx *r;
224 unsigned long flags;
225
226 spin_lock_irqsave(&rx->rx_lock, flags);
227
228 if (list_empty(&rx->free_list)) {
229 spin_unlock_irqrestore(&rx->rx_lock, flags);
230 return NULL;
231 }
232
233 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
234 list_del(&r->free_list);
235
236 rx->avail_count--;
237
238 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
239
240 spin_unlock_irqrestore(&rx->rx_lock, flags);
241
242 return r;
243}
244
245static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
246{
247 unsigned long flags;
248
249 spin_lock_irqsave(&rx->rx_lock, flags);
250
251 list_add_tail(&r->free_list, &rx->free_list);
252 rx->avail_count++;
253
254 spin_unlock_irqrestore(&rx->rx_lock, flags);
255}
256
257static void release_usb(struct lte_udev *udev)
258{
259 struct rx_cxt *rx = &udev->rx;
260 struct tx_cxt *tx = &udev->tx;
261 struct usb_tx *t, *t_next;
262 struct usb_rx *r, *r_next;
263 struct usb_tx_sdu *t_sdu, *t_sdu_next;
264 unsigned long flags;
265
266 spin_lock_irqsave(&tx->lock, flags);
267 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list)
268 {
269 list_del(&t_sdu->list);
270 free_tx_sdu_struct(t_sdu);
271 }
272
273 list_for_each_entry_safe(t, t_next, &tx->hci_list, list)
274 {
275 list_del(&t->list);
276 free_tx_struct(t);
277 }
278
279 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list)
280 {
281 list_del(&t_sdu->list);
282 free_tx_sdu_struct(t_sdu);
283 }
284 spin_unlock_irqrestore(&tx->lock, flags);
285
286 spin_lock_irqsave(&rx->submit_lock, flags);
287 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
288 {
289 spin_unlock_irqrestore(&rx->submit_lock, flags);
290 usb_kill_urb(r->urb);
291 spin_lock_irqsave(&rx->submit_lock, flags);
292 }
293 spin_unlock_irqrestore(&rx->submit_lock, flags);
294
295 spin_lock_irqsave(&rx->rx_lock, flags);
296 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list)
297 {
298 list_del(&r->free_list);
299 free_rx_struct(r);
300 }
301 spin_unlock_irqrestore(&rx->rx_lock, flags);
302
303 spin_lock_irqsave(&rx->to_host_lock, flags);
304 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list)
305 {
306 if (r->index == (void *)udev) {
307 list_del(&r->to_host_list);
308 free_rx_struct(r);
309 }
310 }
311 spin_unlock_irqrestore(&rx->to_host_lock, flags);
312}
313
314static int init_usb(struct lte_udev *udev)
315{
316 int ret = 0;
317 int i;
318 struct tx_cxt *tx = &udev->tx;
319 struct rx_cxt *rx = &udev->rx;
320 struct usb_tx_sdu *t_sdu = NULL;
321 struct usb_rx *r = NULL;
322
323 udev->send_complete = 1;
324 udev->tx_stop = 0;
325 udev->request_mac_addr = 0;
326 udev->usb_state = PM_NORMAL;
327
328 INIT_LIST_HEAD(&tx->sdu_list);
329 INIT_LIST_HEAD(&tx->hci_list);
330 INIT_LIST_HEAD(&tx->free_list);
331 INIT_LIST_HEAD(&rx->rx_submit_list);
332 INIT_LIST_HEAD(&rx->free_list);
333 INIT_LIST_HEAD(&rx->to_host_list);
334 spin_lock_init(&tx->lock);
335 spin_lock_init(&rx->rx_lock);
336 spin_lock_init(&rx->submit_lock);
337 spin_lock_init(&rx->to_host_lock);
338
339 tx->avail_count = 0;
340 rx->avail_count = 0;
341
342 udev->rx_cb = NULL;
343
344 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
345 t_sdu = alloc_tx_sdu_struct();
346 if (t_sdu == NULL) {
347 ret = -ENOMEM;
348 goto fail;
349 }
350
351 list_add(&t_sdu->list, &tx->free_list);
352 tx->avail_count++;
353 }
354
355 for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
356 r = alloc_rx_struct();
357 if (r == NULL) {
358 ret = -ENOMEM;
359 goto fail;
360 }
361
362 list_add(&r->free_list, &rx->free_list);
363 rx->avail_count++;
364 }
365 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
366 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
367 return 0;
368fail:
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400369 release_usb(udev);
Won Kang61e12102013-07-25 03:36:17 +0900370 return ret;
371}
372
373static int set_mac_address(u8 *data, void *arg)
374{
375 struct phy_dev *phy_dev = (struct phy_dev *)arg;
376 struct lte_udev *udev = phy_dev->priv_dev;
377 struct tlv *tlv = (struct tlv *)data;
378 u8 mac_address[ETH_ALEN] = {0, };
379
380 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
381 memcpy(mac_address, tlv->data, tlv->len);
382
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200383 if (register_lte_device(phy_dev,
384 &udev->intf->dev, mac_address) < 0)
Joe Perches0ec473b2013-07-24 14:13:03 -0700385 pr_err("register lte device failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900386
387 udev->request_mac_addr = 0;
388
389 return 1;
390 }
391
392 return 0;
393}
394
395static void do_rx(struct work_struct *work)
396{
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200397 struct lte_udev *udev =
398 container_of(work, struct lte_udev, work_rx.work);
Won Kang61e12102013-07-25 03:36:17 +0900399 struct rx_cxt *rx = &udev->rx;
400 struct usb_rx *r;
401 struct hci_packet *hci;
402 struct phy_dev *phy_dev;
403 u16 cmd_evt;
404 int ret;
405 unsigned long flags;
406
407 while (1) {
408 spin_lock_irqsave(&rx->to_host_lock, flags);
409 if (list_empty(&rx->to_host_list)) {
410 spin_unlock_irqrestore(&rx->to_host_lock, flags);
411 break;
412 }
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200413 r = list_entry(rx->to_host_list.next,
414 struct usb_rx, to_host_list);
Won Kang61e12102013-07-25 03:36:17 +0900415 list_del(&r->to_host_list);
416 spin_unlock_irqrestore(&rx->to_host_lock, flags);
417
418 phy_dev = (struct phy_dev *)r->cb_data;
419 udev = (struct lte_udev *)phy_dev->priv_dev;
420 hci = (struct hci_packet *)r->buf;
421 cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
422
423 switch (cmd_evt) {
424 case LTE_GET_INFORMATION_RESULT:
425 if (set_mac_address(hci->data, r->cb_data) == 0) {
426 ret = r->callback(r->cb_data,
427 r->buf,
428 r->urb->actual_length,
429 KERNEL_THREAD);
430 }
431 break;
432
433 default:
434 if (r->callback) {
435 ret = r->callback(r->cb_data,
436 r->buf,
437 r->urb->actual_length,
438 KERNEL_THREAD);
439
440 if (ret == -EAGAIN)
Joe Perches0ec473b2013-07-24 14:13:03 -0700441 pr_err("failed to send received data\n");
Won Kang61e12102013-07-25 03:36:17 +0900442 }
443 break;
444 }
445
446 put_rx_struct(rx, r);
447
448 gdm_usb_recv(udev,
449 r->callback,
450 r->cb_data,
451 USB_COMPLETE);
452 }
453}
454
455static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
456{
457 unsigned long flags;
458 struct usb_rx *r_remove, *r_remove_next;
459
460 spin_lock_irqsave(&rx->submit_lock, flags);
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200461 list_for_each_entry_safe(r_remove,
462 r_remove_next, &rx->rx_submit_list, rx_submit_list)
Won Kang61e12102013-07-25 03:36:17 +0900463 {
464 if (r == r_remove) {
465 list_del(&r->rx_submit_list);
466 break;
467 }
468 }
469 spin_unlock_irqrestore(&rx->submit_lock, flags);
470}
471
472static void gdm_usb_rcv_complete(struct urb *urb)
473{
474 struct usb_rx *r = urb->context;
475 struct rx_cxt *rx = r->rx;
476 unsigned long flags;
477 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
478 struct usb_device *usbdev = udev->usbdev;
479
480 remove_rx_submit_list(r, rx);
481
482 if (!urb->status && r->callback) {
483 spin_lock_irqsave(&rx->to_host_lock, flags);
484 list_add_tail(&r->to_host_list, &rx->to_host_list);
485 queue_work(usb_rx_wq, &udev->work_rx.work);
486 spin_unlock_irqrestore(&rx->to_host_lock, flags);
487 } else {
488 if (urb->status && udev->usb_state == PM_NORMAL)
Joe Perches0ec473b2013-07-24 14:13:03 -0700489 pr_err("%s: urb status error %d\n",
490 __func__, urb->status);
Won Kang61e12102013-07-25 03:36:17 +0900491
492 put_rx_struct(rx, r);
493 }
494
495 usb_mark_last_busy(usbdev);
496}
497
498static int gdm_usb_recv(void *priv_dev,
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200499 int (*cb)(void *cb_data,
500 void *data, int len, int context),
Won Kang61e12102013-07-25 03:36:17 +0900501 void *cb_data,
502 int context)
503{
504 struct lte_udev *udev = priv_dev;
505 struct usb_device *usbdev = udev->usbdev;
506 struct rx_cxt *rx = &udev->rx;
507 struct usb_rx *r;
508 int no_spc;
509 int ret;
510 unsigned long flags;
511
512 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700513 pr_err("invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900514 return -ENODEV;
515 }
516
517 r = get_rx_struct(rx, &no_spc);
518 if (!r) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700519 pr_err("Out of Memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900520 return -ENOMEM;
521 }
522
523 udev->rx_cb = cb;
524 r->callback = cb;
525 r->cb_data = cb_data;
526 r->index = (void *)udev;
527 r->rx = rx;
528
529 usb_fill_bulk_urb(r->urb,
530 usbdev,
531 usb_rcvbulkpipe(usbdev, 0x83),
532 r->buf,
533 RX_BUF_SIZE,
534 gdm_usb_rcv_complete,
535 r);
536
537 spin_lock_irqsave(&rx->submit_lock, flags);
538 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
539 spin_unlock_irqrestore(&rx->submit_lock, flags);
540
541 if (context == KERNEL_THREAD)
542 ret = usb_submit_urb(r->urb, GFP_KERNEL);
543 else
544 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
545
546 if (ret) {
547 spin_lock_irqsave(&rx->submit_lock, flags);
548 list_del(&r->rx_submit_list);
549 spin_unlock_irqrestore(&rx->submit_lock, flags);
550
Joe Perches0ec473b2013-07-24 14:13:03 -0700551 pr_err("usb_submit_urb failed (%p)\n", r);
Won Kang61e12102013-07-25 03:36:17 +0900552 put_rx_struct(rx, r);
553 }
554
555 return ret;
556}
557
558static void gdm_usb_send_complete(struct urb *urb)
559{
560 struct usb_tx *t = urb->context;
561 struct tx_cxt *tx = t->tx;
562 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
563 unsigned long flags;
564
565 if (urb->status == -ECONNRESET) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700566 pr_info("CONNRESET\n");
Won Kang61e12102013-07-25 03:36:17 +0900567 return;
568 }
569
570 if (t->callback)
571 t->callback(t->cb_data);
572
573 free_tx_struct(t);
574
575 spin_lock_irqsave(&tx->lock, flags);
576 udev->send_complete = 1;
577 queue_work(usb_tx_wq, &udev->work_tx.work);
578 spin_unlock_irqrestore(&tx->lock, flags);
579}
580
581static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
582{
583 int ret = 0;
584
585 if (!(len%512))
586 len++;
587
588 usb_fill_bulk_urb(t->urb,
589 usbdev,
590 usb_sndbulkpipe(usbdev, 2),
591 t->buf,
592 len,
593 gdm_usb_send_complete,
594 t);
595
596 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
597
598 if (ret)
Joe Perches0ec473b2013-07-24 14:13:03 -0700599 pr_err("usb_submit_urb failed: %d\n", ret);
Won Kang61e12102013-07-25 03:36:17 +0900600
601 usb_mark_last_busy(usbdev);
602
603 return ret;
604}
605
606static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
607{
608 struct tx_cxt *tx = &udev->tx;
609 struct usb_tx_sdu *t_sdu = NULL;
610 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
611 u16 send_len = 0;
612 u16 num_packet = 0;
613 unsigned long flags;
614
615 multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
616
617 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
618 spin_lock_irqsave(&tx->lock, flags);
619 if (list_empty(&tx->sdu_list)) {
620 spin_unlock_irqrestore(&tx->lock, flags);
621 break;
622 }
623
624 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
625 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
626 spin_unlock_irqrestore(&tx->lock, flags);
627 break;
628 }
629
630 list_del(&t_sdu->list);
631 spin_unlock_irqrestore(&tx->lock, flags);
632
633 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
634
635 send_len += (t_sdu->len + 3) & 0xfffc;
636 num_packet++;
637
638 if (tx->avail_count > 10)
639 t_sdu->callback(t_sdu->cb_data);
640
641 spin_lock_irqsave(&tx->lock, flags);
642 put_tx_struct(tx, t_sdu);
643 spin_unlock_irqrestore(&tx->lock, flags);
644 }
645
646 multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
647 multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
648
649 return send_len + offsetof(struct multi_sdu, data);
650}
651
652static void do_tx(struct work_struct *work)
653{
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200654 struct lte_udev *udev =
655 container_of(work, struct lte_udev, work_tx.work);
Won Kang61e12102013-07-25 03:36:17 +0900656 struct usb_device *usbdev = udev->usbdev;
657 struct tx_cxt *tx = &udev->tx;
658 struct usb_tx *t = NULL;
659 int is_send = 0;
660 u32 len = 0;
661 unsigned long flags;
662
663 if (!usb_autopm_get_interface(udev->intf))
664 usb_autopm_put_interface(udev->intf);
665
666 if (udev->usb_state == PM_SUSPEND)
667 return;
668
669 spin_lock_irqsave(&tx->lock, flags);
670 if (!udev->send_complete) {
671 spin_unlock_irqrestore(&tx->lock, flags);
672 return;
673 } else {
674 udev->send_complete = 0;
675 }
676
677 if (!list_empty(&tx->hci_list)) {
678 t = list_entry(tx->hci_list.next, struct usb_tx, list);
679 list_del(&t->list);
680 len = t->len;
681 t->is_sdu = 0;
682 is_send = 1;
683 } else if (!list_empty(&tx->sdu_list)) {
684 if (udev->tx_stop) {
685 udev->send_complete = 1;
686 spin_unlock_irqrestore(&tx->lock, flags);
687 return;
688 }
689
690 t = alloc_tx_struct(TX_BUF_SIZE);
691 t->callback = NULL;
692 t->tx = tx;
693 t->is_sdu = 1;
694 is_send = 1;
695 }
696
697 if (!is_send) {
698 udev->send_complete = 1;
699 spin_unlock_irqrestore(&tx->lock, flags);
700 return;
701 }
702 spin_unlock_irqrestore(&tx->lock, flags);
703
704 if (t->is_sdu)
705 len = packet_aggregation(udev, t->buf);
706
707 if (send_tx_packet(usbdev, t, len)) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700708 pr_err("send_tx_packet failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900709 t->callback = NULL;
710 gdm_usb_send_complete(t->urb);
711 }
712}
713
714#define SDU_PARAM_LEN 12
715static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
716 unsigned int dftEpsId, unsigned int epsId,
717 void (*cb)(void *data), void *cb_data,
718 int dev_idx, int nic_type)
719{
720 struct lte_udev *udev = priv_dev;
721 struct tx_cxt *tx = &udev->tx;
722 struct usb_tx_sdu *t_sdu;
723 struct sdu *sdu = NULL;
724 unsigned long flags;
725 int no_spc = 0;
726 u16 send_len;
727
728 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700729 pr_err("sdu send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900730 return TX_NO_DEV;
731 }
732
733 spin_lock_irqsave(&tx->lock, flags);
734 t_sdu = get_tx_sdu_struct(tx, &no_spc);
735 spin_unlock_irqrestore(&tx->lock, flags);
736
737 if (t_sdu == NULL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700738 pr_err("sdu send - free list empty\n");
Won Kang61e12102013-07-25 03:36:17 +0900739 return TX_NO_SPC;
740 }
741
742 sdu = (struct sdu *)t_sdu->buf;
743 sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
744 if (nic_type == NIC_TYPE_ARP) {
745 send_len = len + SDU_PARAM_LEN;
746 memcpy(sdu->data, data, len);
747 } else {
748 send_len = len - ETH_HLEN;
749 send_len += SDU_PARAM_LEN;
750 memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
751 }
752
753 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
754 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
755 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
756 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
757
758 t_sdu->len = send_len + HCI_HEADER_SIZE;
759 t_sdu->callback = cb;
760 t_sdu->cb_data = cb_data;
761
762 spin_lock_irqsave(&tx->lock, flags);
763 list_add_tail(&t_sdu->list, &tx->sdu_list);
764 queue_work(usb_tx_wq, &udev->work_tx.work);
765 spin_unlock_irqrestore(&tx->lock, flags);
766
767 if (no_spc)
768 return TX_NO_BUFFER;
769
770 return 0;
771}
772
773static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
774 void (*cb)(void *data), void *cb_data)
775{
776 struct lte_udev *udev = priv_dev;
777 struct tx_cxt *tx = &udev->tx;
778 struct usb_tx *t;
779 unsigned long flags;
780
781 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700782 pr_err("hci send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900783 return -ENODEV;
784 }
785
786 t = alloc_tx_struct(len);
787 if (t == NULL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700788 pr_err("hci_send - out of memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900789 return -ENOMEM;
790 }
791
792 memcpy(t->buf, data, len);
793 t->callback = cb;
794 t->cb_data = cb_data;
795 t->len = len;
796 t->tx = tx;
797 t->is_sdu = 0;
798
799 spin_lock_irqsave(&tx->lock, flags);
800 list_add_tail(&t->list, &tx->hci_list);
801 queue_work(usb_tx_wq, &udev->work_tx.work);
802 spin_unlock_irqrestore(&tx->lock, flags);
803
804 return 0;
805}
806
807static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
808{
809 struct lte_udev *udev = priv_dev;
810
811 return &udev->gdm_ed;
812}
813
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200814static int gdm_usb_probe(struct usb_interface *intf,
815 const struct usb_device_id *id)
Won Kang61e12102013-07-25 03:36:17 +0900816{
817 int ret = 0;
818 struct phy_dev *phy_dev = NULL;
819 struct lte_udev *udev = NULL;
820 u16 idVendor, idProduct;
821 int bInterfaceNumber;
822 struct usb_device *usbdev = interface_to_usbdev(intf);
823
824 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
825 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
826 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
827
Joe Perches0ec473b2013-07-24 14:13:03 -0700828 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
Won Kang61e12102013-07-25 03:36:17 +0900829
830 if (bInterfaceNumber > NETWORK_INTERFACE) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700831 pr_info("not a network device\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400832 return -ENODEV;
Won Kang61e12102013-07-25 03:36:17 +0900833 }
834
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400835 phy_dev = kzalloc(sizeof(struct phy_dev), GFP_KERNEL);
836 if (!phy_dev)
837 return -ENOMEM;
Won Kang61e12102013-07-25 03:36:17 +0900838
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400839 udev = kzalloc(sizeof(struct lte_udev), GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900840 if (!udev) {
841 ret = -ENOMEM;
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400842 goto err_udev;
Won Kang61e12102013-07-25 03:36:17 +0900843 }
844
Won Kang61e12102013-07-25 03:36:17 +0900845 phy_dev->priv_dev = (void *)udev;
846 phy_dev->send_hci_func = gdm_usb_hci_send;
847 phy_dev->send_sdu_func = gdm_usb_sdu_send;
848 phy_dev->rcv_func = gdm_usb_recv;
849 phy_dev->get_endian = gdm_usb_get_endian;
850
851 udev->usbdev = usbdev;
852 ret = init_usb(udev);
853 if (ret < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700854 pr_err("init_usb func failed\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400855 goto err_init_usb;
Won Kang61e12102013-07-25 03:36:17 +0900856 }
857 udev->intf = intf;
858
859 intf->needs_remote_wakeup = 1;
860 usb_enable_autosuspend(usbdev);
861 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
862
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200863 /* List up hosts with big endians, otherwise,
864 * defaults to little endian
865 */
Won Kang61e12102013-07-25 03:36:17 +0900866 if (idProduct == PID_GDM7243)
Won Kang9c02d0d2013-07-27 15:42:18 +0900867 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
Won Kang61e12102013-07-25 03:36:17 +0900868 else
Won Kang9c02d0d2013-07-27 15:42:18 +0900869 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
Won Kang61e12102013-07-25 03:36:17 +0900870
871 ret = request_mac_address(udev);
872 if (ret < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700873 pr_err("request Mac address failed\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400874 goto err_mac_address;
Won Kang61e12102013-07-25 03:36:17 +0900875 }
876
877 start_rx_proc(phy_dev);
Won Kang61e12102013-07-25 03:36:17 +0900878 usb_get_dev(usbdev);
879 usb_set_intfdata(intf, phy_dev);
880
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400881 return 0;
882
883err_mac_address:
884 release_usb(udev);
885err_init_usb:
886 kfree(udev);
887err_udev:
888 kfree(phy_dev);
889
Won Kang61e12102013-07-25 03:36:17 +0900890 return ret;
891}
892
893static void gdm_usb_disconnect(struct usb_interface *intf)
894{
895 struct phy_dev *phy_dev;
896 struct lte_udev *udev;
897 u16 idVendor, idProduct;
898 struct usb_device *usbdev;
Kiran Padwalaa920152014-07-22 12:38:25 +0530899
Won Kang61e12102013-07-25 03:36:17 +0900900 usbdev = interface_to_usbdev(intf);
901
902 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
903 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
904
905 phy_dev = usb_get_intfdata(intf);
906
907 udev = phy_dev->priv_dev;
908 unregister_lte_device(phy_dev);
909
910 release_usb(udev);
911
912 kfree(udev);
913 udev = NULL;
914
915 kfree(phy_dev);
916 phy_dev = NULL;
917
918 usb_put_dev(usbdev);
919}
920
921static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
922{
923 struct phy_dev *phy_dev;
924 struct lte_udev *udev;
925 struct rx_cxt *rx;
926 struct usb_rx *r;
927 struct usb_rx *r_next;
928 unsigned long flags;
929
930 phy_dev = usb_get_intfdata(intf);
931 udev = phy_dev->priv_dev;
932 rx = &udev->rx;
933 if (udev->usb_state != PM_NORMAL) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700934 pr_err("usb suspend - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900935 return -1;
936 }
937
938 udev->usb_state = PM_SUSPEND;
939
940 spin_lock_irqsave(&rx->submit_lock, flags);
941 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
942 {
943 spin_unlock_irqrestore(&rx->submit_lock, flags);
944 usb_kill_urb(r->urb);
945 spin_lock_irqsave(&rx->submit_lock, flags);
946 }
947 spin_unlock_irqrestore(&rx->submit_lock, flags);
948
949 return 0;
950}
951
952static int gdm_usb_resume(struct usb_interface *intf)
953{
954 struct phy_dev *phy_dev;
955 struct lte_udev *udev;
956 struct tx_cxt *tx;
957 struct rx_cxt *rx;
958 unsigned long flags;
959 int issue_count;
960 int i;
961
962 phy_dev = usb_get_intfdata(intf);
963 udev = phy_dev->priv_dev;
964 rx = &udev->rx;
965
966 if (udev->usb_state != PM_SUSPEND) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700967 pr_err("usb resume - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900968 return -1;
969 }
970 udev->usb_state = PM_NORMAL;
971
972 spin_lock_irqsave(&rx->rx_lock, flags);
973 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
974 spin_unlock_irqrestore(&rx->rx_lock, flags);
975
976 if (issue_count >= 0) {
977 for (i = 0; i < issue_count; i++)
978 gdm_usb_recv(phy_dev->priv_dev,
979 udev->rx_cb,
980 phy_dev,
981 USB_COMPLETE);
982 }
983
984 tx = &udev->tx;
985 spin_lock_irqsave(&tx->lock, flags);
986 queue_work(usb_tx_wq, &udev->work_tx.work);
987 spin_unlock_irqrestore(&tx->lock, flags);
988
989 return 0;
990}
991
992static struct usb_driver gdm_usb_lte_driver = {
993 .name = "gdm_lte",
994 .probe = gdm_usb_probe,
995 .disconnect = gdm_usb_disconnect,
996 .id_table = id_table,
997 .supports_autosuspend = 1,
998 .suspend = gdm_usb_suspend,
999 .resume = gdm_usb_resume,
1000 .reset_resume = gdm_usb_resume,
1001};
1002
1003static int __init gdm_usb_lte_init(void)
1004{
1005 if (gdm_lte_event_init() < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -07001006 pr_err("error creating event\n");
Won Kang61e12102013-07-25 03:36:17 +09001007 return -1;
1008 }
1009
1010 usb_tx_wq = create_workqueue("usb_tx_wq");
1011 if (usb_tx_wq == NULL)
1012 return -1;
1013
1014 usb_rx_wq = create_workqueue("usb_rx_wq");
1015 if (usb_rx_wq == NULL)
1016 return -1;
1017
1018 return usb_register(&gdm_usb_lte_driver);
1019}
1020
1021static void __exit gdm_usb_lte_exit(void)
1022{
1023 gdm_lte_event_exit();
1024
1025 usb_deregister(&gdm_usb_lte_driver);
1026
1027 if (usb_tx_wq) {
1028 flush_workqueue(usb_tx_wq);
1029 destroy_workqueue(usb_tx_wq);
1030 }
1031
1032 if (usb_rx_wq) {
1033 flush_workqueue(usb_rx_wq);
1034 destroy_workqueue(usb_rx_wq);
1035 }
1036}
1037
1038module_init(gdm_usb_lte_init);
1039module_exit(gdm_usb_lte_exit);
1040
1041MODULE_VERSION(DRIVER_VERSION);
1042MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1043MODULE_LICENSE("GPL");