blob: 92ea1a16afff2b6c177c53aca6cc3c768f7d98e9 [file] [log] [blame]
Won Kang61e12102013-07-25 03:36:17 +09001/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Joe Perches0ec473b2013-07-24 14:13:03 -070014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Won Kang61e12102013-07-25 03:36:17 +090016#include <linux/module.h>
Won Kang61e12102013-07-25 03:36:17 +090017#include <linux/kernel.h>
18#include <linux/usb.h>
19#include <linux/sched.h>
20#include <linux/kthread.h>
21#include <linux/usb/cdc.h>
22#include <linux/wait.h>
23#include <linux/if_ether.h>
24#include <linux/pm_runtime.h>
25
26#include "gdm_usb.h"
27#include "gdm_lte.h"
28#include "hci.h"
29#include "hci_packet.h"
30#include "gdm_endian.h"
31
32#define USB_DEVICE_CDC_DATA(vid, pid) \
Aybuke Ozdemir35db0352014-03-14 23:55:05 +020033 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
34 USB_DEVICE_ID_MATCH_INT_CLASS | \
35 USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
Won Kang61e12102013-07-25 03:36:17 +090036 .idVendor = vid,\
37 .idProduct = pid,\
38 .bInterfaceClass = USB_CLASS_COMM,\
39 .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
40
41#define USB_DEVICE_MASS_DATA(vid, pid) \
Aybuke Ozdemir35db0352014-03-14 23:55:05 +020042 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
43 USB_DEVICE_ID_MATCH_INT_INFO,\
Won Kang61e12102013-07-25 03:36:17 +090044 .idVendor = vid,\
45 .idProduct = pid,\
46 .bInterfaceSubClass = USB_SC_SCSI, \
47 .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
48 .bInterfaceProtocol = USB_PR_BULK
49
50static const struct usb_device_id id_table[] = {
51 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
52 { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
53 { }
54};
55
56MODULE_DEVICE_TABLE(usb, id_table);
57
58static struct workqueue_struct *usb_tx_wq;
59static struct workqueue_struct *usb_rx_wq;
60
61static void do_tx(struct work_struct *work);
62static void do_rx(struct work_struct *work);
63
64static int gdm_usb_recv(void *priv_dev,
Aybuke Ozdemir35db0352014-03-14 23:55:05 +020065 int (*cb)(void *cb_data,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +030066 void *data, int len, int context),
Won Kang61e12102013-07-25 03:36:17 +090067 void *cb_data,
68 int context);
69
70static int request_mac_address(struct lte_udev *udev)
71{
72 u8 buf[16] = {0,};
73 struct hci_packet *hci = (struct hci_packet *)buf;
74 struct usb_device *usbdev = udev->usbdev;
75 int actual;
76 int ret = -1;
77
78 hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
79 hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
80 hci->data[0] = MAC_ADDRESS;
81
82 ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +030083 &actual, 1000);
Won Kang61e12102013-07-25 03:36:17 +090084
85 udev->request_mac_addr = 1;
86
87 return ret;
88}
89
90static struct usb_tx *alloc_tx_struct(int len)
91{
92 struct usb_tx *t = NULL;
93 int ret = 0;
94
Ioana Ciornei7b7df122015-10-18 15:51:51 +030095 t = kzalloc(sizeof(*t), GFP_ATOMIC);
Won Kang61e12102013-07-25 03:36:17 +090096 if (!t) {
97 ret = -ENOMEM;
98 goto out;
99 }
Won Kang61e12102013-07-25 03:36:17 +0900100
101 t->urb = usb_alloc_urb(0, GFP_ATOMIC);
102 if (!(len % 512))
103 len++;
104
105 t->buf = kmalloc(len, GFP_ATOMIC);
106 if (!t->urb || !t->buf) {
107 ret = -ENOMEM;
108 goto out;
109 }
110
111out:
112 if (ret < 0) {
113 if (t) {
114 usb_free_urb(t->urb);
115 kfree(t->buf);
116 kfree(t);
117 }
118 return NULL;
119 }
120
121 return t;
122}
123
124static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
125{
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300126 struct usb_tx_sdu *t_sdu;
Won Kang61e12102013-07-25 03:36:17 +0900127
Ioana Ciornei7b7df122015-10-18 15:51:51 +0300128 t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300129 if (!t_sdu)
130 return NULL;
Won Kang61e12102013-07-25 03:36:17 +0900131
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400132 t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900133 if (!t_sdu->buf) {
Dan Carpenter75bc5fa2014-02-18 15:18:10 +0300134 kfree(t_sdu);
Won Kang61e12102013-07-25 03:36:17 +0900135 return NULL;
136 }
137
138 return t_sdu;
139}
140
141static void free_tx_struct(struct usb_tx *t)
142{
143 if (t) {
144 usb_free_urb(t->urb);
145 kfree(t->buf);
146 kfree(t);
147 }
148}
149
150static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
151{
152 if (t_sdu) {
153 kfree(t_sdu->buf);
154 kfree(t_sdu);
155 }
156}
157
158static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
159{
160 struct usb_tx_sdu *t_sdu;
161
162 if (list_empty(&tx->free_list))
163 return NULL;
164
165 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
166 list_del(&t_sdu->list);
167
168 tx->avail_count--;
169
170 *no_spc = list_empty(&tx->free_list) ? 1 : 0;
171
172 return t_sdu;
173}
174
175static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
176{
177 list_add_tail(&t_sdu->list, &tx->free_list);
178 tx->avail_count++;
179}
180
181static struct usb_rx *alloc_rx_struct(void)
182{
183 struct usb_rx *r = NULL;
184 int ret = 0;
185
Ioana Ciornei7b7df122015-10-18 15:51:51 +0300186 r = kmalloc(sizeof(*r), GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900187 if (!r) {
188 ret = -ENOMEM;
189 goto out;
190 }
191
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400192 r->urb = usb_alloc_urb(0, GFP_KERNEL);
193 r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900194 if (!r->urb || !r->buf) {
195 ret = -ENOMEM;
196 goto out;
197 }
198out:
199
200 if (ret < 0) {
201 if (r) {
202 usb_free_urb(r->urb);
203 kfree(r->buf);
204 kfree(r);
205 }
206 return NULL;
207 }
208
209 return r;
210}
211
212static void free_rx_struct(struct usb_rx *r)
213{
214 if (r) {
215 usb_free_urb(r->urb);
216 kfree(r->buf);
217 kfree(r);
218 }
219}
220
221static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
222{
223 struct usb_rx *r;
224 unsigned long flags;
225
226 spin_lock_irqsave(&rx->rx_lock, flags);
227
228 if (list_empty(&rx->free_list)) {
229 spin_unlock_irqrestore(&rx->rx_lock, flags);
230 return NULL;
231 }
232
233 r = list_entry(rx->free_list.next, struct usb_rx, free_list);
234 list_del(&r->free_list);
235
236 rx->avail_count--;
237
238 *no_spc = list_empty(&rx->free_list) ? 1 : 0;
239
240 spin_unlock_irqrestore(&rx->rx_lock, flags);
241
242 return r;
243}
244
245static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
246{
247 unsigned long flags;
248
249 spin_lock_irqsave(&rx->rx_lock, flags);
250
251 list_add_tail(&r->free_list, &rx->free_list);
252 rx->avail_count++;
253
254 spin_unlock_irqrestore(&rx->rx_lock, flags);
255}
256
257static void release_usb(struct lte_udev *udev)
258{
259 struct rx_cxt *rx = &udev->rx;
260 struct tx_cxt *tx = &udev->tx;
261 struct usb_tx *t, *t_next;
262 struct usb_rx *r, *r_next;
263 struct usb_tx_sdu *t_sdu, *t_sdu_next;
264 unsigned long flags;
265
266 spin_lock_irqsave(&tx->lock, flags);
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300267 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
Won Kang61e12102013-07-25 03:36:17 +0900268 list_del(&t_sdu->list);
269 free_tx_sdu_struct(t_sdu);
270 }
271
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300272 list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
Won Kang61e12102013-07-25 03:36:17 +0900273 list_del(&t->list);
274 free_tx_struct(t);
275 }
276
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300277 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
Won Kang61e12102013-07-25 03:36:17 +0900278 list_del(&t_sdu->list);
279 free_tx_sdu_struct(t_sdu);
280 }
281 spin_unlock_irqrestore(&tx->lock, flags);
282
283 spin_lock_irqsave(&rx->submit_lock, flags);
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300284 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
285 rx_submit_list) {
Won Kang61e12102013-07-25 03:36:17 +0900286 spin_unlock_irqrestore(&rx->submit_lock, flags);
287 usb_kill_urb(r->urb);
288 spin_lock_irqsave(&rx->submit_lock, flags);
289 }
290 spin_unlock_irqrestore(&rx->submit_lock, flags);
291
292 spin_lock_irqsave(&rx->rx_lock, flags);
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300293 list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
Won Kang61e12102013-07-25 03:36:17 +0900294 list_del(&r->free_list);
295 free_rx_struct(r);
296 }
297 spin_unlock_irqrestore(&rx->rx_lock, flags);
298
299 spin_lock_irqsave(&rx->to_host_lock, flags);
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300300 list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
Won Kang61e12102013-07-25 03:36:17 +0900301 if (r->index == (void *)udev) {
302 list_del(&r->to_host_list);
303 free_rx_struct(r);
304 }
305 }
306 spin_unlock_irqrestore(&rx->to_host_lock, flags);
307}
308
309static int init_usb(struct lte_udev *udev)
310{
311 int ret = 0;
312 int i;
313 struct tx_cxt *tx = &udev->tx;
314 struct rx_cxt *rx = &udev->rx;
315 struct usb_tx_sdu *t_sdu = NULL;
316 struct usb_rx *r = NULL;
317
318 udev->send_complete = 1;
319 udev->tx_stop = 0;
320 udev->request_mac_addr = 0;
321 udev->usb_state = PM_NORMAL;
322
323 INIT_LIST_HEAD(&tx->sdu_list);
324 INIT_LIST_HEAD(&tx->hci_list);
325 INIT_LIST_HEAD(&tx->free_list);
326 INIT_LIST_HEAD(&rx->rx_submit_list);
327 INIT_LIST_HEAD(&rx->free_list);
328 INIT_LIST_HEAD(&rx->to_host_list);
329 spin_lock_init(&tx->lock);
330 spin_lock_init(&rx->rx_lock);
331 spin_lock_init(&rx->submit_lock);
332 spin_lock_init(&rx->to_host_lock);
333
334 tx->avail_count = 0;
335 rx->avail_count = 0;
336
337 udev->rx_cb = NULL;
338
339 for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
340 t_sdu = alloc_tx_sdu_struct();
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +0300341 if (!t_sdu) {
Won Kang61e12102013-07-25 03:36:17 +0900342 ret = -ENOMEM;
343 goto fail;
344 }
345
346 list_add(&t_sdu->list, &tx->free_list);
347 tx->avail_count++;
348 }
349
Ioana Ciorneiba7f55b2015-10-18 15:51:50 +0300350 for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
Won Kang61e12102013-07-25 03:36:17 +0900351 r = alloc_rx_struct();
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +0300352 if (!r) {
Won Kang61e12102013-07-25 03:36:17 +0900353 ret = -ENOMEM;
354 goto fail;
355 }
356
357 list_add(&r->free_list, &rx->free_list);
358 rx->avail_count++;
359 }
360 INIT_DELAYED_WORK(&udev->work_tx, do_tx);
361 INIT_DELAYED_WORK(&udev->work_rx, do_rx);
362 return 0;
363fail:
Alexey Khoroshilov22505b22014-07-10 19:31:41 -0400364 release_usb(udev);
Won Kang61e12102013-07-25 03:36:17 +0900365 return ret;
366}
367
368static int set_mac_address(u8 *data, void *arg)
369{
Shraddha Barke2594ca32015-10-15 00:58:20 +0530370 struct phy_dev *phy_dev = arg;
Won Kang61e12102013-07-25 03:36:17 +0900371 struct lte_udev *udev = phy_dev->priv_dev;
372 struct tlv *tlv = (struct tlv *)data;
373 u8 mac_address[ETH_ALEN] = {0, };
374
375 if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
376 memcpy(mac_address, tlv->data, tlv->len);
377
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200378 if (register_lte_device(phy_dev,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300379 &udev->intf->dev, mac_address) < 0)
Joe Perches0ec473b2013-07-24 14:13:03 -0700380 pr_err("register lte device failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900381
382 udev->request_mac_addr = 0;
383
384 return 1;
385 }
386
387 return 0;
388}
389
390static void do_rx(struct work_struct *work)
391{
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200392 struct lte_udev *udev =
393 container_of(work, struct lte_udev, work_rx.work);
Won Kang61e12102013-07-25 03:36:17 +0900394 struct rx_cxt *rx = &udev->rx;
395 struct usb_rx *r;
396 struct hci_packet *hci;
397 struct phy_dev *phy_dev;
398 u16 cmd_evt;
399 int ret;
400 unsigned long flags;
401
402 while (1) {
403 spin_lock_irqsave(&rx->to_host_lock, flags);
404 if (list_empty(&rx->to_host_list)) {
405 spin_unlock_irqrestore(&rx->to_host_lock, flags);
406 break;
407 }
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200408 r = list_entry(rx->to_host_list.next,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300409 struct usb_rx, to_host_list);
Won Kang61e12102013-07-25 03:36:17 +0900410 list_del(&r->to_host_list);
411 spin_unlock_irqrestore(&rx->to_host_lock, flags);
412
Shraddha Barke2594ca32015-10-15 00:58:20 +0530413 phy_dev = r->cb_data;
414 udev = phy_dev->priv_dev;
Won Kang61e12102013-07-25 03:36:17 +0900415 hci = (struct hci_packet *)r->buf;
416 cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
417
418 switch (cmd_evt) {
419 case LTE_GET_INFORMATION_RESULT:
420 if (set_mac_address(hci->data, r->cb_data) == 0) {
421 ret = r->callback(r->cb_data,
422 r->buf,
423 r->urb->actual_length,
424 KERNEL_THREAD);
425 }
426 break;
427
428 default:
429 if (r->callback) {
430 ret = r->callback(r->cb_data,
431 r->buf,
432 r->urb->actual_length,
433 KERNEL_THREAD);
434
435 if (ret == -EAGAIN)
Joe Perches0ec473b2013-07-24 14:13:03 -0700436 pr_err("failed to send received data\n");
Won Kang61e12102013-07-25 03:36:17 +0900437 }
438 break;
439 }
440
441 put_rx_struct(rx, r);
442
443 gdm_usb_recv(udev,
444 r->callback,
445 r->cb_data,
446 USB_COMPLETE);
447 }
448}
449
450static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
451{
452 unsigned long flags;
453 struct usb_rx *r_remove, *r_remove_next;
454
455 spin_lock_irqsave(&rx->submit_lock, flags);
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300456 list_for_each_entry_safe(r_remove, r_remove_next,
457 &rx->rx_submit_list, rx_submit_list) {
Won Kang61e12102013-07-25 03:36:17 +0900458 if (r == r_remove) {
459 list_del(&r->rx_submit_list);
460 break;
461 }
462 }
463 spin_unlock_irqrestore(&rx->submit_lock, flags);
464}
465
466static void gdm_usb_rcv_complete(struct urb *urb)
467{
468 struct usb_rx *r = urb->context;
469 struct rx_cxt *rx = r->rx;
470 unsigned long flags;
471 struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
472 struct usb_device *usbdev = udev->usbdev;
473
474 remove_rx_submit_list(r, rx);
475
476 if (!urb->status && r->callback) {
477 spin_lock_irqsave(&rx->to_host_lock, flags);
478 list_add_tail(&r->to_host_list, &rx->to_host_list);
479 queue_work(usb_rx_wq, &udev->work_rx.work);
480 spin_unlock_irqrestore(&rx->to_host_lock, flags);
481 } else {
482 if (urb->status && udev->usb_state == PM_NORMAL)
Haneen Mohammed3d719422015-03-18 13:08:59 +0300483 dev_err(&urb->dev->dev, "%s: urb status error %d\n",
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300484 __func__, urb->status);
Won Kang61e12102013-07-25 03:36:17 +0900485
486 put_rx_struct(rx, r);
487 }
488
489 usb_mark_last_busy(usbdev);
490}
491
492static int gdm_usb_recv(void *priv_dev,
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200493 int (*cb)(void *cb_data,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300494 void *data, int len, int context),
Won Kang61e12102013-07-25 03:36:17 +0900495 void *cb_data,
496 int context)
497{
498 struct lte_udev *udev = priv_dev;
499 struct usb_device *usbdev = udev->usbdev;
500 struct rx_cxt *rx = &udev->rx;
501 struct usb_rx *r;
502 int no_spc;
503 int ret;
504 unsigned long flags;
505
506 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700507 pr_err("invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900508 return -ENODEV;
509 }
510
511 r = get_rx_struct(rx, &no_spc);
512 if (!r) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700513 pr_err("Out of Memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900514 return -ENOMEM;
515 }
516
517 udev->rx_cb = cb;
518 r->callback = cb;
519 r->cb_data = cb_data;
520 r->index = (void *)udev;
521 r->rx = rx;
522
523 usb_fill_bulk_urb(r->urb,
524 usbdev,
525 usb_rcvbulkpipe(usbdev, 0x83),
526 r->buf,
527 RX_BUF_SIZE,
528 gdm_usb_rcv_complete,
529 r);
530
531 spin_lock_irqsave(&rx->submit_lock, flags);
532 list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
533 spin_unlock_irqrestore(&rx->submit_lock, flags);
534
535 if (context == KERNEL_THREAD)
536 ret = usb_submit_urb(r->urb, GFP_KERNEL);
537 else
538 ret = usb_submit_urb(r->urb, GFP_ATOMIC);
539
540 if (ret) {
541 spin_lock_irqsave(&rx->submit_lock, flags);
542 list_del(&r->rx_submit_list);
543 spin_unlock_irqrestore(&rx->submit_lock, flags);
544
Joe Perches0ec473b2013-07-24 14:13:03 -0700545 pr_err("usb_submit_urb failed (%p)\n", r);
Won Kang61e12102013-07-25 03:36:17 +0900546 put_rx_struct(rx, r);
547 }
548
549 return ret;
550}
551
552static void gdm_usb_send_complete(struct urb *urb)
553{
554 struct usb_tx *t = urb->context;
555 struct tx_cxt *tx = t->tx;
556 struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
557 unsigned long flags;
558
559 if (urb->status == -ECONNRESET) {
Haneen Mohammed3d719422015-03-18 13:08:59 +0300560 dev_info(&urb->dev->dev, "CONNRESET\n");
Won Kang61e12102013-07-25 03:36:17 +0900561 return;
562 }
563
564 if (t->callback)
565 t->callback(t->cb_data);
566
567 free_tx_struct(t);
568
569 spin_lock_irqsave(&tx->lock, flags);
570 udev->send_complete = 1;
571 queue_work(usb_tx_wq, &udev->work_tx.work);
572 spin_unlock_irqrestore(&tx->lock, flags);
573}
574
575static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
576{
577 int ret = 0;
578
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +0300579 if (!(len % 512))
Won Kang61e12102013-07-25 03:36:17 +0900580 len++;
581
582 usb_fill_bulk_urb(t->urb,
583 usbdev,
584 usb_sndbulkpipe(usbdev, 2),
585 t->buf,
586 len,
587 gdm_usb_send_complete,
588 t);
589
590 ret = usb_submit_urb(t->urb, GFP_ATOMIC);
591
592 if (ret)
Haneen Mohammed3d719422015-03-18 13:08:59 +0300593 dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
594 ret);
Won Kang61e12102013-07-25 03:36:17 +0900595
596 usb_mark_last_busy(usbdev);
597
598 return ret;
599}
600
601static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
602{
603 struct tx_cxt *tx = &udev->tx;
604 struct usb_tx_sdu *t_sdu = NULL;
605 struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
606 u16 send_len = 0;
607 u16 num_packet = 0;
608 unsigned long flags;
609
610 multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
611
612 while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
613 spin_lock_irqsave(&tx->lock, flags);
614 if (list_empty(&tx->sdu_list)) {
615 spin_unlock_irqrestore(&tx->lock, flags);
616 break;
617 }
618
619 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
620 if (send_len + t_sdu->len > MAX_SDU_SIZE) {
621 spin_unlock_irqrestore(&tx->lock, flags);
622 break;
623 }
624
625 list_del(&t_sdu->list);
626 spin_unlock_irqrestore(&tx->lock, flags);
627
628 memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
629
630 send_len += (t_sdu->len + 3) & 0xfffc;
631 num_packet++;
632
633 if (tx->avail_count > 10)
634 t_sdu->callback(t_sdu->cb_data);
635
636 spin_lock_irqsave(&tx->lock, flags);
637 put_tx_struct(tx, t_sdu);
638 spin_unlock_irqrestore(&tx->lock, flags);
639 }
640
641 multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
642 multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
643
644 return send_len + offsetof(struct multi_sdu, data);
645}
646
647static void do_tx(struct work_struct *work)
648{
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200649 struct lte_udev *udev =
650 container_of(work, struct lte_udev, work_tx.work);
Won Kang61e12102013-07-25 03:36:17 +0900651 struct usb_device *usbdev = udev->usbdev;
652 struct tx_cxt *tx = &udev->tx;
653 struct usb_tx *t = NULL;
654 int is_send = 0;
655 u32 len = 0;
656 unsigned long flags;
657
658 if (!usb_autopm_get_interface(udev->intf))
659 usb_autopm_put_interface(udev->intf);
660
661 if (udev->usb_state == PM_SUSPEND)
662 return;
663
664 spin_lock_irqsave(&tx->lock, flags);
665 if (!udev->send_complete) {
666 spin_unlock_irqrestore(&tx->lock, flags);
667 return;
Won Kang61e12102013-07-25 03:36:17 +0900668 }
Gulsah Kosed1fed022014-09-26 23:50:09 +0300669 udev->send_complete = 0;
Won Kang61e12102013-07-25 03:36:17 +0900670
671 if (!list_empty(&tx->hci_list)) {
672 t = list_entry(tx->hci_list.next, struct usb_tx, list);
673 list_del(&t->list);
674 len = t->len;
675 t->is_sdu = 0;
676 is_send = 1;
677 } else if (!list_empty(&tx->sdu_list)) {
678 if (udev->tx_stop) {
679 udev->send_complete = 1;
680 spin_unlock_irqrestore(&tx->lock, flags);
681 return;
682 }
683
684 t = alloc_tx_struct(TX_BUF_SIZE);
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +0300685 if (!t) {
Abel Moyoa600f452014-09-18 21:49:10 +0200686 spin_unlock_irqrestore(&tx->lock, flags);
687 return;
688 }
Won Kang61e12102013-07-25 03:36:17 +0900689 t->callback = NULL;
690 t->tx = tx;
691 t->is_sdu = 1;
692 is_send = 1;
693 }
694
695 if (!is_send) {
696 udev->send_complete = 1;
697 spin_unlock_irqrestore(&tx->lock, flags);
698 return;
699 }
700 spin_unlock_irqrestore(&tx->lock, flags);
701
702 if (t->is_sdu)
703 len = packet_aggregation(udev, t->buf);
704
705 if (send_tx_packet(usbdev, t, len)) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700706 pr_err("send_tx_packet failed\n");
Won Kang61e12102013-07-25 03:36:17 +0900707 t->callback = NULL;
708 gdm_usb_send_complete(t->urb);
709 }
710}
711
712#define SDU_PARAM_LEN 12
713static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300714 unsigned int dftEpsId, unsigned int epsId,
715 void (*cb)(void *data), void *cb_data,
Won Kang61e12102013-07-25 03:36:17 +0900716 int dev_idx, int nic_type)
717{
718 struct lte_udev *udev = priv_dev;
719 struct tx_cxt *tx = &udev->tx;
720 struct usb_tx_sdu *t_sdu;
721 struct sdu *sdu = NULL;
722 unsigned long flags;
723 int no_spc = 0;
724 u16 send_len;
725
726 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700727 pr_err("sdu send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900728 return TX_NO_DEV;
729 }
730
731 spin_lock_irqsave(&tx->lock, flags);
732 t_sdu = get_tx_sdu_struct(tx, &no_spc);
733 spin_unlock_irqrestore(&tx->lock, flags);
734
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +0300735 if (!t_sdu) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700736 pr_err("sdu send - free list empty\n");
Won Kang61e12102013-07-25 03:36:17 +0900737 return TX_NO_SPC;
738 }
739
740 sdu = (struct sdu *)t_sdu->buf;
741 sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
742 if (nic_type == NIC_TYPE_ARP) {
743 send_len = len + SDU_PARAM_LEN;
744 memcpy(sdu->data, data, len);
745 } else {
746 send_len = len - ETH_HLEN;
747 send_len += SDU_PARAM_LEN;
Ioana Ciorneiba7f55b2015-10-18 15:51:50 +0300748 memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
Won Kang61e12102013-07-25 03:36:17 +0900749 }
750
751 sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
752 sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
753 sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
754 sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
755
756 t_sdu->len = send_len + HCI_HEADER_SIZE;
757 t_sdu->callback = cb;
758 t_sdu->cb_data = cb_data;
759
760 spin_lock_irqsave(&tx->lock, flags);
761 list_add_tail(&t_sdu->list, &tx->sdu_list);
762 queue_work(usb_tx_wq, &udev->work_tx.work);
763 spin_unlock_irqrestore(&tx->lock, flags);
764
765 if (no_spc)
766 return TX_NO_BUFFER;
767
768 return 0;
769}
770
771static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300772 void (*cb)(void *data), void *cb_data)
Won Kang61e12102013-07-25 03:36:17 +0900773{
774 struct lte_udev *udev = priv_dev;
775 struct tx_cxt *tx = &udev->tx;
776 struct usb_tx *t;
777 unsigned long flags;
778
779 if (!udev->usbdev) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700780 pr_err("hci send - invalid device\n");
Won Kang61e12102013-07-25 03:36:17 +0900781 return -ENODEV;
782 }
783
784 t = alloc_tx_struct(len);
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +0300785 if (!t) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700786 pr_err("hci_send - out of memory\n");
Won Kang61e12102013-07-25 03:36:17 +0900787 return -ENOMEM;
788 }
789
790 memcpy(t->buf, data, len);
791 t->callback = cb;
792 t->cb_data = cb_data;
793 t->len = len;
794 t->tx = tx;
795 t->is_sdu = 0;
796
797 spin_lock_irqsave(&tx->lock, flags);
798 list_add_tail(&t->list, &tx->hci_list);
799 queue_work(usb_tx_wq, &udev->work_tx.work);
800 spin_unlock_irqrestore(&tx->lock, flags);
801
802 return 0;
803}
804
805static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
806{
807 struct lte_udev *udev = priv_dev;
808
809 return &udev->gdm_ed;
810}
811
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200812static int gdm_usb_probe(struct usb_interface *intf,
Ioana Ciorneia4785ef2015-10-18 15:51:48 +0300813 const struct usb_device_id *id)
Won Kang61e12102013-07-25 03:36:17 +0900814{
815 int ret = 0;
816 struct phy_dev *phy_dev = NULL;
817 struct lte_udev *udev = NULL;
818 u16 idVendor, idProduct;
819 int bInterfaceNumber;
820 struct usb_device *usbdev = interface_to_usbdev(intf);
821
822 bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
823 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
824 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
825
Joe Perches0ec473b2013-07-24 14:13:03 -0700826 pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
Won Kang61e12102013-07-25 03:36:17 +0900827
828 if (bInterfaceNumber > NETWORK_INTERFACE) {
Joe Perches0ec473b2013-07-24 14:13:03 -0700829 pr_info("not a network device\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400830 return -ENODEV;
Won Kang61e12102013-07-25 03:36:17 +0900831 }
832
Ioana Ciornei7b7df122015-10-18 15:51:51 +0300833 phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400834 if (!phy_dev)
835 return -ENOMEM;
Won Kang61e12102013-07-25 03:36:17 +0900836
Ioana Ciornei7b7df122015-10-18 15:51:51 +0300837 udev = kzalloc(sizeof(*udev), GFP_KERNEL);
Won Kang61e12102013-07-25 03:36:17 +0900838 if (!udev) {
839 ret = -ENOMEM;
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400840 goto err_udev;
Won Kang61e12102013-07-25 03:36:17 +0900841 }
842
Won Kang61e12102013-07-25 03:36:17 +0900843 phy_dev->priv_dev = (void *)udev;
844 phy_dev->send_hci_func = gdm_usb_hci_send;
845 phy_dev->send_sdu_func = gdm_usb_sdu_send;
846 phy_dev->rcv_func = gdm_usb_recv;
847 phy_dev->get_endian = gdm_usb_get_endian;
848
849 udev->usbdev = usbdev;
850 ret = init_usb(udev);
851 if (ret < 0) {
Haneen Mohammeddf02b502015-03-17 08:34:20 +0300852 dev_err(intf->usb_dev, "init_usb func failed\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400853 goto err_init_usb;
Won Kang61e12102013-07-25 03:36:17 +0900854 }
855 udev->intf = intf;
856
857 intf->needs_remote_wakeup = 1;
858 usb_enable_autosuspend(usbdev);
859 pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
860
Aybuke Ozdemir35db0352014-03-14 23:55:05 +0200861 /* List up hosts with big endians, otherwise,
862 * defaults to little endian
863 */
Won Kang61e12102013-07-25 03:36:17 +0900864 if (idProduct == PID_GDM7243)
Won Kang9c02d0d2013-07-27 15:42:18 +0900865 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
Won Kang61e12102013-07-25 03:36:17 +0900866 else
Won Kang9c02d0d2013-07-27 15:42:18 +0900867 gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
Won Kang61e12102013-07-25 03:36:17 +0900868
869 ret = request_mac_address(udev);
870 if (ret < 0) {
Haneen Mohammeddf02b502015-03-17 08:34:20 +0300871 dev_err(intf->usb_dev, "request Mac address failed\n");
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400872 goto err_mac_address;
Won Kang61e12102013-07-25 03:36:17 +0900873 }
874
875 start_rx_proc(phy_dev);
Won Kang61e12102013-07-25 03:36:17 +0900876 usb_get_dev(usbdev);
877 usb_set_intfdata(intf, phy_dev);
878
Alexey Khoroshilova34c72b2013-11-16 00:46:24 +0400879 return 0;
880
881err_mac_address:
882 release_usb(udev);
883err_init_usb:
884 kfree(udev);
885err_udev:
886 kfree(phy_dev);
887
Won Kang61e12102013-07-25 03:36:17 +0900888 return ret;
889}
890
891static void gdm_usb_disconnect(struct usb_interface *intf)
892{
893 struct phy_dev *phy_dev;
894 struct lte_udev *udev;
895 u16 idVendor, idProduct;
896 struct usb_device *usbdev;
Kiran Padwalaa920152014-07-22 12:38:25 +0530897
Won Kang61e12102013-07-25 03:36:17 +0900898 usbdev = interface_to_usbdev(intf);
899
900 idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
901 idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
902
903 phy_dev = usb_get_intfdata(intf);
904
905 udev = phy_dev->priv_dev;
906 unregister_lte_device(phy_dev);
907
908 release_usb(udev);
909
910 kfree(udev);
911 udev = NULL;
912
913 kfree(phy_dev);
914 phy_dev = NULL;
915
916 usb_put_dev(usbdev);
917}
918
919static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
920{
921 struct phy_dev *phy_dev;
922 struct lte_udev *udev;
923 struct rx_cxt *rx;
924 struct usb_rx *r;
925 struct usb_rx *r_next;
926 unsigned long flags;
927
928 phy_dev = usb_get_intfdata(intf);
929 udev = phy_dev->priv_dev;
930 rx = &udev->rx;
931 if (udev->usb_state != PM_NORMAL) {
Haneen Mohammeddf02b502015-03-17 08:34:20 +0300932 dev_err(intf->usb_dev, "usb suspend - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900933 return -1;
934 }
935
936 udev->usb_state = PM_SUSPEND;
937
938 spin_lock_irqsave(&rx->submit_lock, flags);
Cihangir Akturke010a2a2014-07-27 03:21:15 +0300939 list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
940 rx_submit_list) {
Won Kang61e12102013-07-25 03:36:17 +0900941 spin_unlock_irqrestore(&rx->submit_lock, flags);
942 usb_kill_urb(r->urb);
943 spin_lock_irqsave(&rx->submit_lock, flags);
944 }
945 spin_unlock_irqrestore(&rx->submit_lock, flags);
946
947 return 0;
948}
949
950static int gdm_usb_resume(struct usb_interface *intf)
951{
952 struct phy_dev *phy_dev;
953 struct lte_udev *udev;
954 struct tx_cxt *tx;
955 struct rx_cxt *rx;
956 unsigned long flags;
957 int issue_count;
958 int i;
959
960 phy_dev = usb_get_intfdata(intf);
961 udev = phy_dev->priv_dev;
962 rx = &udev->rx;
963
964 if (udev->usb_state != PM_SUSPEND) {
Haneen Mohammeddf02b502015-03-17 08:34:20 +0300965 dev_err(intf->usb_dev, "usb resume - invalid state\n");
Won Kang61e12102013-07-25 03:36:17 +0900966 return -1;
967 }
968 udev->usb_state = PM_NORMAL;
969
970 spin_lock_irqsave(&rx->rx_lock, flags);
971 issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
972 spin_unlock_irqrestore(&rx->rx_lock, flags);
973
974 if (issue_count >= 0) {
975 for (i = 0; i < issue_count; i++)
976 gdm_usb_recv(phy_dev->priv_dev,
977 udev->rx_cb,
978 phy_dev,
979 USB_COMPLETE);
980 }
981
982 tx = &udev->tx;
983 spin_lock_irqsave(&tx->lock, flags);
984 queue_work(usb_tx_wq, &udev->work_tx.work);
985 spin_unlock_irqrestore(&tx->lock, flags);
986
987 return 0;
988}
989
990static struct usb_driver gdm_usb_lte_driver = {
991 .name = "gdm_lte",
992 .probe = gdm_usb_probe,
993 .disconnect = gdm_usb_disconnect,
994 .id_table = id_table,
995 .supports_autosuspend = 1,
996 .suspend = gdm_usb_suspend,
997 .resume = gdm_usb_resume,
998 .reset_resume = gdm_usb_resume,
999};
1000
1001static int __init gdm_usb_lte_init(void)
1002{
1003 if (gdm_lte_event_init() < 0) {
Joe Perches0ec473b2013-07-24 14:13:03 -07001004 pr_err("error creating event\n");
Won Kang61e12102013-07-25 03:36:17 +09001005 return -1;
1006 }
1007
1008 usb_tx_wq = create_workqueue("usb_tx_wq");
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +03001009 if (!usb_tx_wq)
Won Kang61e12102013-07-25 03:36:17 +09001010 return -1;
1011
1012 usb_rx_wq = create_workqueue("usb_rx_wq");
Ioana Ciorneib6f6fd82015-10-18 15:51:49 +03001013 if (!usb_rx_wq)
Won Kang61e12102013-07-25 03:36:17 +09001014 return -1;
1015
1016 return usb_register(&gdm_usb_lte_driver);
1017}
1018
1019static void __exit gdm_usb_lte_exit(void)
1020{
1021 gdm_lte_event_exit();
1022
1023 usb_deregister(&gdm_usb_lte_driver);
1024
1025 if (usb_tx_wq) {
1026 flush_workqueue(usb_tx_wq);
1027 destroy_workqueue(usb_tx_wq);
1028 }
1029
1030 if (usb_rx_wq) {
1031 flush_workqueue(usb_rx_wq);
1032 destroy_workqueue(usb_rx_wq);
1033 }
1034}
1035
1036module_init(gdm_usb_lte_init);
1037module_exit(gdm_usb_lte_exit);
1038
1039MODULE_VERSION(DRIVER_VERSION);
1040MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1041MODULE_LICENSE("GPL");