blob: 8aec34f73d2a31dfcdee109edc0311c961fe9207 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * f_rmnet.c -- RmNet function driver
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
7 * Copyright (C) 2008 Nokia Corporation
Duy Truong790f06d2013-02-13 16:38:12 -08008 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/err.h>
29#include <linux/interrupt.h>
30#include <linux/list.h>
31#include <linux/device.h>
32#include <linux/wait.h>
33#include <linux/workqueue.h>
34#include <linux/bitops.h>
35#include <linux/termios.h>
36#include <linux/debugfs.h>
37
38#include <mach/msm_smd.h>
39#include <linux/usb/cdc.h>
40#include <linux/usb/composite.h>
41#include <linux/usb/ch9.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
43#include "gadget_chips.h"
44
Pavankumar Kondeti48daffd2011-10-27 14:57:46 +053045#ifndef CONFIG_MSM_SMD
46#define CONFIG_RMNET_SMD_CTL_CHANNEL ""
47#define CONFIG_RMNET_SMD_DATA_CHANNEL ""
48#endif
49
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
51module_param(rmnet_ctl_ch, charp, S_IRUGO);
52MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
53
54static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
55module_param(rmnet_data_ch, charp, S_IRUGO);
56MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
57
Manu Gautam1c8ffd72011-09-02 16:00:49 +053058#define RMNET_SMD_ACM_CTRL_DTR (1 << 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Manu Gautam1c8ffd72011-09-02 16:00:49 +053060#define RMNET_SMD_NOTIFY_INTERVAL 5
61#define RMNET_SMD_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Manu Gautam1c8ffd72011-09-02 16:00:49 +053063#define QMI_REQ_MAX 4
64#define QMI_REQ_SIZE 2048
65#define QMI_RESP_MAX 8
66#define QMI_RESP_SIZE 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Manu Gautam1c8ffd72011-09-02 16:00:49 +053068#define RMNET_RX_REQ_MAX 8
69#define RMNET_RX_REQ_SIZE 2048
70#define RMNET_TX_REQ_MAX 8
71#define RMNET_TX_REQ_SIZE 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
Manu Gautam1c8ffd72011-09-02 16:00:49 +053073#define RMNET_TXN_MAX 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
75/* QMI requests & responses buffer*/
76struct qmi_buf {
77 void *buf;
78 int len;
79 struct list_head list;
80};
81
82/* Control & data SMD channel private data */
Manu Gautam1c8ffd72011-09-02 16:00:49 +053083struct rmnet_smd_ch_info {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 struct smd_channel *ch;
85 struct tasklet_struct tx_tlet;
86 struct tasklet_struct rx_tlet;
87#define CH_OPENED 0
88 unsigned long flags;
89 /* pending rx packet length */
90 atomic_t rx_pkt;
91 /* wait for smd open event*/
92 wait_queue_head_t wait;
93};
94
Manu Gautam1c8ffd72011-09-02 16:00:49 +053095struct rmnet_smd_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 struct usb_function function;
97 struct usb_composite_dev *cdev;
98
99 struct usb_ep *epout;
100 struct usb_ep *epin;
101 struct usb_ep *epnotify;
102 struct usb_request *notify_req;
103
104 u8 ifc_id;
105 /* QMI lists */
106 struct list_head qmi_req_pool;
107 struct list_head qmi_resp_pool;
108 struct list_head qmi_req_q;
109 struct list_head qmi_resp_q;
110 /* Tx/Rx lists */
111 struct list_head tx_idle;
112 struct list_head rx_idle;
113 struct list_head rx_queue;
114
115 spinlock_t lock;
116 atomic_t online;
117 atomic_t notify_count;
118
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530119 struct platform_driver pdrv;
120 u8 is_pdrv_used;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530121 struct rmnet_smd_ch_info smd_ctl;
122 struct rmnet_smd_ch_info smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124 struct workqueue_struct *wq;
125 struct work_struct connect_work;
126 struct work_struct disconnect_work;
127
128 unsigned long dpkts_to_host;
129 unsigned long dpkts_from_modem;
130 unsigned long dpkts_from_host;
131 unsigned long dpkts_to_modem;
132
133 unsigned long cpkts_to_host;
134 unsigned long cpkts_from_modem;
135 unsigned long cpkts_from_host;
136 unsigned long cpkts_to_modem;
137};
138
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530139static struct rmnet_smd_dev *rmnet_smd;
140
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530141static struct usb_interface_descriptor rmnet_smd_interface_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 .bLength = USB_DT_INTERFACE_SIZE,
143 .bDescriptorType = USB_DT_INTERFACE,
144 /* .bInterfaceNumber = DYNAMIC */
145 .bNumEndpoints = 3,
146 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
147 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
148 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
149 /* .iInterface = DYNAMIC */
150};
151
152/* Full speed support */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530153static struct usb_endpoint_descriptor rmnet_smd_fs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 .bLength = USB_DT_ENDPOINT_SIZE,
155 .bDescriptorType = USB_DT_ENDPOINT,
156 .bEndpointAddress = USB_DIR_IN,
157 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530158 .wMaxPacketSize = __constant_cpu_to_le16(
159 RMNET_SMD_MAX_NOTIFY_SIZE),
160 .bInterval = 1 << RMNET_SMD_NOTIFY_INTERVAL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161};
162
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530163static struct usb_endpoint_descriptor rmnet_smd_fs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 .bLength = USB_DT_ENDPOINT_SIZE,
165 .bDescriptorType = USB_DT_ENDPOINT,
166 .bEndpointAddress = USB_DIR_IN,
167 .bmAttributes = USB_ENDPOINT_XFER_BULK,
168 .wMaxPacketSize = __constant_cpu_to_le16(64),
169};
170
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530171static struct usb_endpoint_descriptor rmnet_smd_fs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 .bLength = USB_DT_ENDPOINT_SIZE,
173 .bDescriptorType = USB_DT_ENDPOINT,
174 .bEndpointAddress = USB_DIR_OUT,
175 .bmAttributes = USB_ENDPOINT_XFER_BULK,
176 .wMaxPacketSize = __constant_cpu_to_le16(64),
177};
178
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530179static struct usb_descriptor_header *rmnet_smd_fs_function[] = {
180 (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
181 (struct usb_descriptor_header *) &rmnet_smd_fs_notify_desc,
182 (struct usb_descriptor_header *) &rmnet_smd_fs_in_desc,
183 (struct usb_descriptor_header *) &rmnet_smd_fs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 NULL,
185};
186
187/* High speed support */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530188static struct usb_endpoint_descriptor rmnet_smd_hs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 .bLength = USB_DT_ENDPOINT_SIZE,
190 .bDescriptorType = USB_DT_ENDPOINT,
191 .bEndpointAddress = USB_DIR_IN,
192 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530193 .wMaxPacketSize = __constant_cpu_to_le16(
194 RMNET_SMD_MAX_NOTIFY_SIZE),
195 .bInterval = RMNET_SMD_NOTIFY_INTERVAL + 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196};
197
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530198static struct usb_endpoint_descriptor rmnet_smd_hs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 .bLength = USB_DT_ENDPOINT_SIZE,
200 .bDescriptorType = USB_DT_ENDPOINT,
201 .bEndpointAddress = USB_DIR_IN,
202 .bmAttributes = USB_ENDPOINT_XFER_BULK,
203 .wMaxPacketSize = __constant_cpu_to_le16(512),
204};
205
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530206static struct usb_endpoint_descriptor rmnet_smd_hs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 .bLength = USB_DT_ENDPOINT_SIZE,
208 .bDescriptorType = USB_DT_ENDPOINT,
209 .bEndpointAddress = USB_DIR_OUT,
210 .bmAttributes = USB_ENDPOINT_XFER_BULK,
211 .wMaxPacketSize = __constant_cpu_to_le16(512),
212};
213
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530214static struct usb_descriptor_header *rmnet_smd_hs_function[] = {
215 (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
216 (struct usb_descriptor_header *) &rmnet_smd_hs_notify_desc,
217 (struct usb_descriptor_header *) &rmnet_smd_hs_in_desc,
218 (struct usb_descriptor_header *) &rmnet_smd_hs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 NULL,
220};
221
222/* String descriptors */
223
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530224static struct usb_string rmnet_smd_string_defs[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 [0].s = "QMI RmNet",
226 { } /* end of list */
227};
228
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530229static struct usb_gadget_strings rmnet_smd_string_table = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 .language = 0x0409, /* en-us */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530231 .strings = rmnet_smd_string_defs,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232};
233
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530234static struct usb_gadget_strings *rmnet_smd_strings[] = {
235 &rmnet_smd_string_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 NULL,
237};
238
239static struct qmi_buf *
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530240rmnet_smd_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241{
242 struct qmi_buf *qmi;
243
244 qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
245 if (qmi != NULL) {
246 qmi->buf = kmalloc(len, kmalloc_flags);
247 if (qmi->buf == NULL) {
248 kfree(qmi);
249 qmi = NULL;
250 }
251 }
252
253 return qmi ? qmi : ERR_PTR(-ENOMEM);
254}
255
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530256static void rmnet_smd_free_qmi(struct qmi_buf *qmi)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257{
258 kfree(qmi->buf);
259 kfree(qmi);
260}
261/*
262 * Allocate a usb_request and its buffer. Returns a pointer to the
263 * usb_request or a error code if there is an error.
264 */
265static struct usb_request *
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530266rmnet_smd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267{
268 struct usb_request *req;
269
270 req = usb_ep_alloc_request(ep, kmalloc_flags);
271
272 if (req != NULL) {
273 req->length = len;
274 req->buf = kmalloc(len, kmalloc_flags);
275 if (req->buf == NULL) {
276 usb_ep_free_request(ep, req);
277 req = NULL;
278 }
279 }
280
281 return req ? req : ERR_PTR(-ENOMEM);
282}
283
284/*
285 * Free a usb_request and its buffer.
286 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530287static void rmnet_smd_free_req(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288{
289 kfree(req->buf);
290 usb_ep_free_request(ep, req);
291}
292
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530293static void rmnet_smd_notify_complete(struct usb_ep *ep,
294 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530296 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 struct usb_composite_dev *cdev = dev->cdev;
298 int status = req->status;
299
300 switch (status) {
301 case -ECONNRESET:
302 case -ESHUTDOWN:
303 /* connection gone */
304 atomic_set(&dev->notify_count, 0);
305 break;
306 default:
307 ERROR(cdev, "rmnet notify ep error %d\n", status);
308 /* FALLTHROUGH */
309 case 0:
310 if (ep != dev->epnotify)
311 break;
312
313 /* handle multiple pending QMI_RESPONSE_AVAILABLE
314 * notifications by resending until we're done
315 */
316 if (atomic_dec_and_test(&dev->notify_count))
317 break;
318
319 status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
320 if (status) {
321 atomic_dec(&dev->notify_count);
322 ERROR(cdev, "rmnet notify ep enqueue error %d\n",
323 status);
324 }
325 break;
326 }
327}
328
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530329static void qmi_smd_response_available(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330{
331 struct usb_composite_dev *cdev = dev->cdev;
332 struct usb_request *req = dev->notify_req;
333 struct usb_cdc_notification *event = req->buf;
334 int status;
335
336 /* Response will be sent later */
337 if (atomic_inc_return(&dev->notify_count) != 1)
338 return;
339
340 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
341 | USB_RECIP_INTERFACE;
342 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
343 event->wValue = cpu_to_le16(0);
344 event->wIndex = cpu_to_le16(dev->ifc_id);
345 event->wLength = cpu_to_le16(0);
346
347 status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
348 if (status < 0) {
349 atomic_dec(&dev->notify_count);
350 ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
351 }
352}
353
354/* TODO
355 * handle modem restart events
356 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530357static void rmnet_smd_event_notify(void *priv, unsigned event)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530359 struct rmnet_smd_ch_info *smd_info = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 int len = atomic_read(&smd_info->rx_pkt);
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530361 struct rmnet_smd_dev *dev =
362 (struct rmnet_smd_dev *) smd_info->tx_tlet.data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363
364 switch (event) {
365 case SMD_EVENT_DATA: {
366 if (!atomic_read(&dev->online))
367 break;
368 if (len && (smd_write_avail(smd_info->ch) >= len))
369 tasklet_schedule(&smd_info->rx_tlet);
370
371 if (smd_read_avail(smd_info->ch))
372 tasklet_schedule(&smd_info->tx_tlet);
373
374 break;
375 }
376 case SMD_EVENT_OPEN:
377 /* usb endpoints are not enabled untill smd channels
378 * are opened. wake up worker thread to continue
379 * connection processing
380 */
381 set_bit(CH_OPENED, &smd_info->flags);
382 wake_up(&smd_info->wait);
383 break;
384 case SMD_EVENT_CLOSE:
385 /* We will never come here.
386 * reset flags after closing smd channel
387 * */
388 clear_bit(CH_OPENED, &smd_info->flags);
389 break;
390 }
391}
392
393static void rmnet_control_tx_tlet(unsigned long arg)
394{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530395 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 struct usb_composite_dev *cdev = dev->cdev;
397 struct qmi_buf *qmi_resp;
398 int sz;
399 unsigned long flags;
400
401 while (1) {
402 sz = smd_cur_packet_size(dev->smd_ctl.ch);
403 if (sz == 0)
404 break;
405 if (smd_read_avail(dev->smd_ctl.ch) < sz)
406 break;
407
408 spin_lock_irqsave(&dev->lock, flags);
409 if (list_empty(&dev->qmi_resp_pool)) {
410 ERROR(cdev, "rmnet QMI Tx buffers full\n");
411 spin_unlock_irqrestore(&dev->lock, flags);
412 break;
413 }
414 qmi_resp = list_first_entry(&dev->qmi_resp_pool,
415 struct qmi_buf, list);
416 list_del(&qmi_resp->list);
417 spin_unlock_irqrestore(&dev->lock, flags);
418
419 qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
420
421 spin_lock_irqsave(&dev->lock, flags);
422 dev->cpkts_from_modem++;
423 list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
424 spin_unlock_irqrestore(&dev->lock, flags);
425
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530426 qmi_smd_response_available(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 }
428
429}
430
431static void rmnet_control_rx_tlet(unsigned long arg)
432{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530433 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 struct usb_composite_dev *cdev = dev->cdev;
435 struct qmi_buf *qmi_req;
436 int ret;
437 unsigned long flags;
438
439 spin_lock_irqsave(&dev->lock, flags);
440 while (1) {
441
442 if (list_empty(&dev->qmi_req_q)) {
443 atomic_set(&dev->smd_ctl.rx_pkt, 0);
444 break;
445 }
446 qmi_req = list_first_entry(&dev->qmi_req_q,
447 struct qmi_buf, list);
448 if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
449 atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
450 DBG(cdev, "rmnet control smd channel full\n");
451 break;
452 }
453
454 list_del(&qmi_req->list);
455 dev->cpkts_from_host++;
456 spin_unlock_irqrestore(&dev->lock, flags);
457 ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
458 spin_lock_irqsave(&dev->lock, flags);
459 if (ret != qmi_req->len) {
460 ERROR(cdev, "rmnet control smd write failed\n");
461 break;
462 }
463 dev->cpkts_to_modem++;
464 list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
465 }
466 spin_unlock_irqrestore(&dev->lock, flags);
467}
468
Manu Gautamfb7401c2011-09-26 14:11:11 +0530469static void rmnet_smd_command_complete(struct usb_ep *ep,
470 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530472 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 struct usb_composite_dev *cdev = dev->cdev;
474 struct qmi_buf *qmi_req;
475 int ret;
476
477 if (req->status < 0) {
478 ERROR(cdev, "rmnet command error %d\n", req->status);
479 return;
480 }
481
482 spin_lock(&dev->lock);
483 dev->cpkts_from_host++;
484 /* no pending control rx packet */
485 if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
486 if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
487 atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
488 goto queue_req;
489 }
490 spin_unlock(&dev->lock);
491 ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
492 /* This should never happen */
493 if (ret != req->actual)
494 ERROR(cdev, "rmnet control smd write failed\n");
495 spin_lock(&dev->lock);
496 dev->cpkts_to_modem++;
497 spin_unlock(&dev->lock);
498 return;
499 }
500queue_req:
501 if (list_empty(&dev->qmi_req_pool)) {
502 spin_unlock(&dev->lock);
503 ERROR(cdev, "rmnet QMI pool is empty\n");
504 return;
505 }
506
507 qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
508 list_del(&qmi_req->list);
509 spin_unlock(&dev->lock);
510 memcpy(qmi_req->buf, req->buf, req->actual);
511 qmi_req->len = req->actual;
512 spin_lock(&dev->lock);
513 list_add_tail(&qmi_req->list, &dev->qmi_req_q);
514 spin_unlock(&dev->lock);
515}
516static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
517{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530518 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519
520 spin_lock(&dev->lock);
521 dev->cpkts_to_host++;
522 spin_unlock(&dev->lock);
523}
524
525static int
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530526rmnet_smd_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527{
Manu Gautamfb7401c2011-09-26 14:11:11 +0530528 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
529 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 struct usb_composite_dev *cdev = f->config->cdev;
531 struct usb_request *req = cdev->req;
532 int ret = -EOPNOTSUPP;
533 u16 w_index = le16_to_cpu(ctrl->wIndex);
534 u16 w_value = le16_to_cpu(ctrl->wValue);
535 u16 w_length = le16_to_cpu(ctrl->wLength);
536 struct qmi_buf *resp;
537 int schedule = 0;
538
539 if (!atomic_read(&dev->online))
540 return -ENOTCONN;
541
542 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
543
544 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
545 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 ret = w_length;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530547 req->complete = rmnet_smd_command_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 req->context = dev;
549 break;
550
551
552 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
553 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
554 if (w_value)
555 goto invalid;
556 else {
557 spin_lock(&dev->lock);
558 if (list_empty(&dev->qmi_resp_q)) {
559 INFO(cdev, "qmi resp empty "
560 " req%02x.%02x v%04x i%04x l%d\n",
561 ctrl->bRequestType, ctrl->bRequest,
562 w_value, w_index, w_length);
563 spin_unlock(&dev->lock);
564 goto invalid;
565 }
566 resp = list_first_entry(&dev->qmi_resp_q,
567 struct qmi_buf, list);
568 list_del(&resp->list);
569 spin_unlock(&dev->lock);
570 memcpy(req->buf, resp->buf, resp->len);
571 ret = resp->len;
572 spin_lock(&dev->lock);
573
574 if (list_empty(&dev->qmi_resp_pool))
575 schedule = 1;
576 list_add_tail(&resp->list, &dev->qmi_resp_pool);
577
578 if (schedule)
579 tasklet_schedule(&dev->smd_ctl.tx_tlet);
580 spin_unlock(&dev->lock);
581 req->complete = rmnet_txcommand_complete;
582 req->context = dev;
583 }
584 break;
585 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
586 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
587 /* This is a workaround for RmNet and is borrowed from the
588 * CDC/ACM standard. The host driver will issue the above ACM
589 * standard request to the RmNet interface in the following
590 * scenario: Once the network adapter is disabled from device
591 * manager, the above request will be sent from the qcusbnet
592 * host driver, with DTR being '0'. Once network adapter is
593 * enabled from device manager (or during enumeration), the
594 * request will be sent with DTR being '1'.
595 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530596 if (w_value & RMNET_SMD_ACM_CTRL_DTR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
598 else
599 ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
600
601 break;
602 default:
603
604invalid:
605 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
606 ctrl->bRequestType, ctrl->bRequest,
607 w_value, w_index, w_length);
608 }
609
610 /* respond with data transfer or status phase? */
611 if (ret >= 0) {
612 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
613 ctrl->bRequestType, ctrl->bRequest,
614 w_value, w_index, w_length);
615 req->zero = 0;
616 req->length = ret;
617 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
618 if (ret < 0)
619 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
620 }
621
622 return ret;
623}
624
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530625static void rmnet_smd_start_rx(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626{
627 struct usb_composite_dev *cdev = dev->cdev;
628 int status;
629 struct usb_request *req;
630 struct list_head *pool = &dev->rx_idle;
631 unsigned long flags;
632
633 spin_lock_irqsave(&dev->lock, flags);
634 while (!list_empty(pool)) {
635 req = list_entry(pool->next, struct usb_request, list);
636 list_del(&req->list);
637
638 spin_unlock_irqrestore(&dev->lock, flags);
639 status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
640 spin_lock_irqsave(&dev->lock, flags);
641
642 if (status) {
643 ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
644 list_add_tail(&req->list, pool);
645 break;
646 }
647 }
648 spin_unlock_irqrestore(&dev->lock, flags);
649}
650
651static void rmnet_data_tx_tlet(unsigned long arg)
652{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530653 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 struct usb_composite_dev *cdev = dev->cdev;
655 struct usb_request *req;
656 int status;
657 int sz;
658 unsigned long flags;
659
660 while (1) {
661
662 sz = smd_cur_packet_size(dev->smd_data.ch);
663 if (sz == 0)
664 break;
665 if (smd_read_avail(dev->smd_data.ch) < sz)
666 break;
667
668 spin_lock_irqsave(&dev->lock, flags);
669 if (list_empty(&dev->tx_idle)) {
670 spin_unlock_irqrestore(&dev->lock, flags);
671 DBG(cdev, "rmnet data Tx buffers full\n");
672 break;
673 }
674 req = list_first_entry(&dev->tx_idle, struct usb_request, list);
675 list_del(&req->list);
676 spin_unlock_irqrestore(&dev->lock, flags);
677
678 req->length = smd_read(dev->smd_data.ch, req->buf, sz);
679 status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
680 if (status) {
681 ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
682 spin_lock_irqsave(&dev->lock, flags);
683 list_add_tail(&req->list, &dev->tx_idle);
684 spin_unlock_irqrestore(&dev->lock, flags);
685 break;
686 }
687 spin_lock_irqsave(&dev->lock, flags);
688 dev->dpkts_from_modem++;
689 spin_unlock_irqrestore(&dev->lock, flags);
690 }
691
692}
693
694static void rmnet_data_rx_tlet(unsigned long arg)
695{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530696 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 struct usb_composite_dev *cdev = dev->cdev;
698 struct usb_request *req;
699 int ret;
700 unsigned long flags;
701
702 spin_lock_irqsave(&dev->lock, flags);
703 while (1) {
704 if (list_empty(&dev->rx_queue)) {
705 atomic_set(&dev->smd_data.rx_pkt, 0);
706 break;
707 }
708 req = list_first_entry(&dev->rx_queue,
709 struct usb_request, list);
710 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
711 atomic_set(&dev->smd_data.rx_pkt, req->actual);
712 DBG(cdev, "rmnet SMD data channel full\n");
713 break;
714 }
715
716 list_del(&req->list);
717 spin_unlock_irqrestore(&dev->lock, flags);
718 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
719 spin_lock_irqsave(&dev->lock, flags);
720 if (ret != req->actual) {
721 ERROR(cdev, "rmnet SMD data write failed\n");
722 break;
723 }
724 dev->dpkts_to_modem++;
725 list_add_tail(&req->list, &dev->rx_idle);
726 }
727 spin_unlock_irqrestore(&dev->lock, flags);
728
729 /* We have free rx data requests. */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530730 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731}
732
733/* If SMD has enough room to accommodate a data rx packet,
734 * write into SMD directly. Otherwise enqueue to rx_queue.
735 * We will not write into SMD directly untill rx_queue is
736 * empty to strictly follow the ordering requests.
737 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530738static void rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530740 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 struct usb_composite_dev *cdev = dev->cdev;
742 int status = req->status;
743 int ret;
744
745 switch (status) {
746 case 0:
747 /* normal completion */
748 break;
749 case -ECONNRESET:
750 case -ESHUTDOWN:
751 /* connection gone */
752 spin_lock(&dev->lock);
753 list_add_tail(&req->list, &dev->rx_idle);
754 spin_unlock(&dev->lock);
755 return;
756 default:
757 /* unexpected failure */
758 ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
759 ep->name, status,
760 req->actual, req->length);
761 spin_lock(&dev->lock);
762 list_add_tail(&req->list, &dev->rx_idle);
763 spin_unlock(&dev->lock);
764 return;
765 }
766
767 spin_lock(&dev->lock);
768 dev->dpkts_from_host++;
769 if (!atomic_read(&dev->smd_data.rx_pkt)) {
770 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
771 atomic_set(&dev->smd_data.rx_pkt, req->actual);
772 goto queue_req;
773 }
774 spin_unlock(&dev->lock);
775 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
776 /* This should never happen */
777 if (ret != req->actual)
778 ERROR(cdev, "rmnet data smd write failed\n");
779 /* Restart Rx */
780 spin_lock(&dev->lock);
781 dev->dpkts_to_modem++;
782 list_add_tail(&req->list, &dev->rx_idle);
783 spin_unlock(&dev->lock);
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530784 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 return;
786 }
787queue_req:
788 list_add_tail(&req->list, &dev->rx_queue);
789 spin_unlock(&dev->lock);
790}
791
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530792static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530794 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 struct usb_composite_dev *cdev = dev->cdev;
796 int status = req->status;
797 int schedule = 0;
798
799 switch (status) {
800 case -ECONNRESET:
801 case -ESHUTDOWN:
802 /* connection gone */
803 spin_lock(&dev->lock);
804 list_add_tail(&req->list, &dev->tx_idle);
805 spin_unlock(&dev->lock);
806 break;
807 default:
808 ERROR(cdev, "rmnet data tx ep error %d\n", status);
809 /* FALLTHROUGH */
810 case 0:
811 spin_lock(&dev->lock);
812 if (list_empty(&dev->tx_idle))
813 schedule = 1;
814 list_add_tail(&req->list, &dev->tx_idle);
815 dev->dpkts_to_host++;
816 if (schedule)
817 tasklet_schedule(&dev->smd_data.tx_tlet);
818 spin_unlock(&dev->lock);
819 break;
820 }
821
822}
823
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530824static void rmnet_smd_disconnect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825{
826 struct qmi_buf *qmi;
827 struct usb_request *req;
828 struct list_head *act, *tmp;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530829 struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 disconnect_work);
831
832 tasklet_kill(&dev->smd_ctl.rx_tlet);
833 tasklet_kill(&dev->smd_ctl.tx_tlet);
834 tasklet_kill(&dev->smd_data.rx_tlet);
835 tasklet_kill(&dev->smd_data.tx_tlet);
836
837 smd_close(dev->smd_ctl.ch);
838 dev->smd_ctl.flags = 0;
839
840 smd_close(dev->smd_data.ch);
841 dev->smd_data.flags = 0;
842
843 atomic_set(&dev->notify_count, 0);
844
845 list_for_each_safe(act, tmp, &dev->rx_queue) {
846 req = list_entry(act, struct usb_request, list);
847 list_del(&req->list);
848 list_add_tail(&req->list, &dev->rx_idle);
849 }
850
851 list_for_each_safe(act, tmp, &dev->qmi_req_q) {
852 qmi = list_entry(act, struct qmi_buf, list);
853 list_del(&qmi->list);
854 list_add_tail(&qmi->list, &dev->qmi_req_pool);
855 }
856
857 list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
858 qmi = list_entry(act, struct qmi_buf, list);
859 list_del(&qmi->list);
860 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
861 }
862
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530863 if (dev->is_pdrv_used) {
864 platform_driver_unregister(&dev->pdrv);
865 dev->is_pdrv_used = 0;
866 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867}
868
869/* SMD close may sleep
870 * schedule a work to close smd channels
871 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530872static void rmnet_smd_disable(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530874 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
875 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877 atomic_set(&dev->online, 0);
878
879 usb_ep_fifo_flush(dev->epnotify);
880 usb_ep_disable(dev->epnotify);
881 usb_ep_fifo_flush(dev->epout);
882 usb_ep_disable(dev->epout);
883
884 usb_ep_fifo_flush(dev->epin);
885 usb_ep_disable(dev->epin);
886
887 /* cleanup work */
888 queue_work(dev->wq, &dev->disconnect_work);
889}
890
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530891static void rmnet_smd_connect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530893 struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
894 connect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 struct usb_composite_dev *cdev = dev->cdev;
896 int ret = 0;
897
898 /* Control channel for QMI messages */
899 ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530900 &dev->smd_ctl, rmnet_smd_event_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 if (ret) {
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530902 ERROR(cdev, "Unable to open control smd channel: %d\n", ret);
903 /*
904 * Register platform driver to be notified in case SMD channels
905 * later becomes ready to be opened.
906 */
Chiranjeevi Velempatic49315c2012-10-03 19:45:59 +0530907 if (!dev->is_pdrv_used) {
908 ret = platform_driver_register(&dev->pdrv);
909 if (ret)
910 ERROR(cdev, "pdrv %s register failed %d\n",
911 dev->pdrv.driver.name, ret);
912 else
913 dev->is_pdrv_used = 1;
914 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915 return;
916 }
917 wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
918 &dev->smd_ctl.flags));
919
920 /* Data channel for network packets */
921 ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530922 &dev->smd_data, rmnet_smd_event_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 if (ret) {
924 ERROR(cdev, "Unable to open data smd channel\n");
925 smd_close(dev->smd_ctl.ch);
926 return;
927 }
928 wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
929 &dev->smd_data.flags));
930
931 atomic_set(&dev->online, 1);
932 /* Queue Rx data requests */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530933 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934}
935
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530936static int rmnet_smd_ch_probe(struct platform_device *pdev)
937{
938 DBG(rmnet_smd->cdev, "Probe called for device: %s\n", pdev->name);
939
940 queue_work(rmnet_smd->wq, &rmnet_smd->connect_work);
941
942 return 0;
943}
944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945/* SMD open may sleep.
946 * Schedule a work to open smd channels and enable
947 * endpoints if smd channels are opened successfully.
948 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530949static int rmnet_smd_set_alt(struct usb_function *f,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 unsigned intf, unsigned alt)
951{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530952 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
953 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 struct usb_composite_dev *cdev = dev->cdev;
955 int ret = 0;
956
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200957 /* Enable epin endpoint */
958 ret = config_ep_by_speed(cdev->gadget, f, dev->epin);
959 if (ret) {
960 dev->epin->desc = NULL;
961 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
962 dev->epin->name, ret);
963 return ret;
964 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300965 ret = usb_ep_enable(dev->epin);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 if (ret) {
967 ERROR(cdev, "can't enable %s, result %d\n",
968 dev->epin->name, ret);
969 return ret;
970 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200971
972 /* Enable epout endpoint */
973 ret = config_ep_by_speed(cdev->gadget, f, dev->epout);
974 if (ret) {
975 dev->epout->desc = NULL;
976 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
977 dev->epout->name, ret);
978 usb_ep_disable(dev->epin);
979 return ret;
980 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300981 ret = usb_ep_enable(dev->epout);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 if (ret) {
984 ERROR(cdev, "can't enable %s, result %d\n",
985 dev->epout->name, ret);
986 usb_ep_disable(dev->epin);
987 return ret;
988 }
989
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200990 /* Enable epnotify endpoint */
991 ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify);
992 if (ret) {
993 dev->epnotify->desc = NULL;
994 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
995 dev->epnotify->name, ret);
996 usb_ep_disable(dev->epin);
997 usb_ep_disable(dev->epout);
998 return ret;
999 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001000 ret = usb_ep_enable(dev->epnotify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001 if (ret) {
1002 ERROR(cdev, "can't enable %s, result %d\n",
1003 dev->epnotify->name, ret);
1004 usb_ep_disable(dev->epin);
1005 usb_ep_disable(dev->epout);
1006 return ret;
1007 }
1008
1009 queue_work(dev->wq, &dev->connect_work);
1010 return 0;
1011}
1012
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301013static void rmnet_smd_free_buf(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014{
1015 struct qmi_buf *qmi;
1016 struct usb_request *req;
1017 struct list_head *act, *tmp;
1018
1019 dev->dpkts_to_host = 0;
1020 dev->dpkts_from_modem = 0;
1021 dev->dpkts_from_host = 0;
1022 dev->dpkts_to_modem = 0;
1023
1024 dev->cpkts_to_host = 0;
1025 dev->cpkts_from_modem = 0;
1026 dev->cpkts_from_host = 0;
1027 dev->cpkts_to_modem = 0;
1028 /* free all usb requests in tx pool */
1029 list_for_each_safe(act, tmp, &dev->tx_idle) {
1030 req = list_entry(act, struct usb_request, list);
1031 list_del(&req->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301032 rmnet_smd_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033 }
1034
1035 /* free all usb requests in rx pool */
1036 list_for_each_safe(act, tmp, &dev->rx_idle) {
1037 req = list_entry(act, struct usb_request, list);
1038 list_del(&req->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301039 rmnet_smd_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 }
1041
1042 /* free all buffers in qmi request pool */
1043 list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
1044 qmi = list_entry(act, struct qmi_buf, list);
1045 list_del(&qmi->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301046 rmnet_smd_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001047 }
1048
1049 /* free all buffers in qmi request pool */
1050 list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
1051 qmi = list_entry(act, struct qmi_buf, list);
1052 list_del(&qmi->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301053 rmnet_smd_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 }
1055
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301056 rmnet_smd_free_req(dev->epnotify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057}
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301058static int rmnet_smd_bind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059{
1060 struct usb_composite_dev *cdev = c->cdev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301061 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
1062 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 int i, id, ret;
1064 struct qmi_buf *qmi;
1065 struct usb_request *req;
1066 struct usb_ep *ep;
1067
1068 dev->cdev = cdev;
1069
1070 /* allocate interface ID */
1071 id = usb_interface_id(c, f);
1072 if (id < 0)
1073 return id;
1074 dev->ifc_id = id;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301075 rmnet_smd_interface_desc.bInterfaceNumber = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301077 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_in_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 if (!ep)
1079 return -ENODEV;
1080 ep->driver_data = cdev; /* claim endpoint */
1081 dev->epin = ep;
1082
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301083 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_out_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 if (!ep)
1085 return -ENODEV;
1086 ep->driver_data = cdev; /* claim endpoint */
1087 dev->epout = ep;
1088
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301089 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 if (!ep)
1091 return -ENODEV;
1092 ep->driver_data = cdev; /* clain endpoint */
1093 dev->epnotify = ep;
1094
1095 /* support all relevant hardware speeds... we expect that when
1096 * hardware is dual speed, all bulk-capable endpoints work at
1097 * both speeds
1098 */
1099 if (gadget_is_dualspeed(c->cdev->gadget)) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301100 rmnet_smd_hs_in_desc.bEndpointAddress =
1101 rmnet_smd_fs_in_desc.bEndpointAddress;
1102 rmnet_smd_hs_out_desc.bEndpointAddress =
1103 rmnet_smd_fs_out_desc.bEndpointAddress;
1104 rmnet_smd_hs_notify_desc.bEndpointAddress =
1105 rmnet_smd_fs_notify_desc.bEndpointAddress;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106
1107 }
1108
1109 /* allocate notification */
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301110 dev->notify_req = rmnet_smd_alloc_req(dev->epnotify,
1111 RMNET_SMD_MAX_NOTIFY_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 if (IS_ERR(dev->notify_req))
1113 return PTR_ERR(dev->notify_req);
1114
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301115 dev->notify_req->complete = rmnet_smd_notify_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 dev->notify_req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301117 dev->notify_req->length = RMNET_SMD_MAX_NOTIFY_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118
1119 /* Allocate the qmi request and response buffers */
1120 for (i = 0; i < QMI_REQ_MAX; i++) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301121 qmi = rmnet_smd_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122 if (IS_ERR(qmi)) {
1123 ret = PTR_ERR(qmi);
1124 goto free_buf;
1125 }
1126 list_add_tail(&qmi->list, &dev->qmi_req_pool);
1127 }
1128
1129 for (i = 0; i < QMI_RESP_MAX; i++) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301130 qmi = rmnet_smd_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 if (IS_ERR(qmi)) {
1132 ret = PTR_ERR(qmi);
1133 goto free_buf;
1134 }
1135 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
1136 }
1137
1138 /* Allocate bulk in/out requests for data transfer */
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301139 for (i = 0; i < RMNET_RX_REQ_MAX; i++) {
1140 req = rmnet_smd_alloc_req(dev->epout, RMNET_RX_REQ_SIZE,
1141 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 if (IS_ERR(req)) {
1143 ret = PTR_ERR(req);
1144 goto free_buf;
1145 }
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301146 req->length = RMNET_TXN_MAX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001147 req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301148 req->complete = rmnet_smd_complete_epout;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 list_add_tail(&req->list, &dev->rx_idle);
1150 }
1151
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301152 for (i = 0; i < RMNET_TX_REQ_MAX; i++) {
Manu Gautamfb7401c2011-09-26 14:11:11 +05301153 req = rmnet_smd_alloc_req(dev->epin, RMNET_TX_REQ_SIZE,
1154 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155 if (IS_ERR(req)) {
1156 ret = PTR_ERR(req);
1157 goto free_buf;
1158 }
1159 req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301160 req->complete = rmnet_smd_complete_epin;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001161 list_add_tail(&req->list, &dev->tx_idle);
1162 }
1163
1164 return 0;
1165
1166free_buf:
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301167 rmnet_smd_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1169 return ret;
1170}
1171
1172#if defined(CONFIG_DEBUG_FS)
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301173static ssize_t rmnet_smd_debug_read_stats(struct file *file, char __user *ubuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 size_t count, loff_t *ppos)
1175{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301176 struct rmnet_smd_dev *dev = file->private_data;
1177 struct rmnet_smd_ch_info smd_ctl_info = dev->smd_ctl;
1178 struct rmnet_smd_ch_info smd_data_info = dev->smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 char *buf;
1180 unsigned long flags;
1181 int ret;
1182
1183 buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
1184 if (!buf)
1185 return -ENOMEM;
1186
1187 spin_lock_irqsave(&dev->lock, flags);
1188 ret = scnprintf(buf, 512,
1189 "smd_control_ch_opened: %lu\n"
1190 "smd_data_ch_opened: %lu\n"
1191 "usb online : %d\n"
1192 "dpkts_from_modem: %lu\n"
1193 "dpkts_to_host: %lu\n"
1194 "pending_dpkts_to_host: %lu\n"
1195 "dpkts_from_host: %lu\n"
1196 "dpkts_to_modem: %lu\n"
1197 "pending_dpkts_to_modem: %lu\n"
1198 "cpkts_from_modem: %lu\n"
1199 "cpkts_to_host: %lu\n"
1200 "pending_cpkts_to_host: %lu\n"
1201 "cpkts_from_host: %lu\n"
1202 "cpkts_to_modem: %lu\n"
1203 "pending_cpkts_to_modem: %lu\n"
1204 "smd_read_avail_ctrl: %d\n"
1205 "smd_write_avail_ctrl: %d\n"
1206 "smd_read_avail_data: %d\n"
1207 "smd_write_avail_data: %d\n",
1208 smd_ctl_info.flags, smd_data_info.flags,
1209 atomic_read(&dev->online),
1210 dev->dpkts_from_modem, dev->dpkts_to_host,
1211 (dev->dpkts_from_modem - dev->dpkts_to_host),
1212 dev->dpkts_from_host, dev->dpkts_to_modem,
1213 (dev->dpkts_from_host - dev->dpkts_to_modem),
1214 dev->cpkts_from_modem, dev->cpkts_to_host,
1215 (dev->cpkts_from_modem - dev->cpkts_to_host),
1216 dev->cpkts_from_host, dev->cpkts_to_modem,
1217 (dev->cpkts_from_host - dev->cpkts_to_modem),
1218 smd_read_avail(dev->smd_ctl.ch),
1219 smd_write_avail(dev->smd_ctl.ch),
1220 smd_read_avail(dev->smd_data.ch),
1221 smd_write_avail(dev->smd_data.ch));
1222
1223 spin_unlock_irqrestore(&dev->lock, flags);
1224
1225 ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
1226
1227 kfree(buf);
1228
1229 return ret;
1230}
1231
Manu Gautamfb7401c2011-09-26 14:11:11 +05301232static ssize_t rmnet_smd_debug_reset_stats(struct file *file,
1233 const char __user *buf,
1234 size_t count, loff_t *ppos)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301236 struct rmnet_smd_dev *dev = file->private_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 unsigned long flags;
1238
1239 spin_lock_irqsave(&dev->lock, flags);
1240
1241 dev->dpkts_to_host = 0;
1242 dev->dpkts_from_modem = 0;
1243 dev->dpkts_from_host = 0;
1244 dev->dpkts_to_modem = 0;
1245
1246 dev->cpkts_to_host = 0;
1247 dev->cpkts_from_modem = 0;
1248 dev->cpkts_from_host = 0;
1249 dev->cpkts_to_modem = 0;
1250
1251 spin_unlock_irqrestore(&dev->lock, flags);
1252
1253 return count;
1254}
1255
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301256static int rmnet_smd_debug_open(struct inode *inode, struct file *file)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257{
1258 file->private_data = inode->i_private;
1259
1260 return 0;
1261}
1262
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301263const struct file_operations rmnet_smd_debug_stats_ops = {
1264 .open = rmnet_smd_debug_open,
1265 .read = rmnet_smd_debug_read_stats,
1266 .write = rmnet_smd_debug_reset_stats,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267};
1268
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301269struct dentry *dent_smd;
1270struct dentry *dent_smd_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301272static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273{
1274
Manu Gautam18be30b2011-11-01 15:53:17 +05301275 dent_smd = debugfs_create_dir("usb_rmnet_smd", 0);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301276 if (IS_ERR(dent_smd))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 return;
1278
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301279 dent_smd_status = debugfs_create_file("status", 0444, dent_smd, dev,
1280 &rmnet_smd_debug_stats_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301282 if (!dent_smd_status) {
1283 debugfs_remove(dent_smd);
1284 dent_smd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 return;
1286 }
1287
1288 return;
1289}
1290#else
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301291static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) {}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001292#endif
1293
1294static void
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301295rmnet_smd_unbind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301297 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
1298 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299
1300 tasklet_kill(&dev->smd_ctl.rx_tlet);
1301 tasklet_kill(&dev->smd_ctl.tx_tlet);
1302 tasklet_kill(&dev->smd_data.rx_tlet);
1303 tasklet_kill(&dev->smd_data.tx_tlet);
1304
1305 flush_workqueue(dev->wq);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301306 rmnet_smd_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1308
1309 destroy_workqueue(dev->wq);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301310 debugfs_remove_recursive(dent_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311 kfree(dev);
1312
1313}
1314
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301315int rmnet_smd_bind_config(struct usb_configuration *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001316{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301317 struct rmnet_smd_dev *dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001318 int ret;
1319
1320 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1321 if (!dev)
1322 return -ENOMEM;
1323
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05301324 rmnet_smd = dev;
1325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 dev->wq = create_singlethread_workqueue("k_rmnet_work");
1327 if (!dev->wq) {
1328 ret = -ENOMEM;
1329 goto free_dev;
1330 }
1331
1332 spin_lock_init(&dev->lock);
1333 atomic_set(&dev->notify_count, 0);
1334 atomic_set(&dev->online, 0);
1335 atomic_set(&dev->smd_ctl.rx_pkt, 0);
1336 atomic_set(&dev->smd_data.rx_pkt, 0);
1337
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301338 INIT_WORK(&dev->connect_work, rmnet_smd_connect_work);
1339 INIT_WORK(&dev->disconnect_work, rmnet_smd_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340
1341 tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
1342 (unsigned long) dev);
1343 tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
1344 (unsigned long) dev);
1345 tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
1346 (unsigned long) dev);
1347 tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
1348 (unsigned long) dev);
1349
1350 init_waitqueue_head(&dev->smd_ctl.wait);
1351 init_waitqueue_head(&dev->smd_data.wait);
1352
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05301353 dev->pdrv.probe = rmnet_smd_ch_probe;
1354 dev->pdrv.driver.name = CONFIG_RMNET_SMD_CTL_CHANNEL;
1355 dev->pdrv.driver.owner = THIS_MODULE;
1356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357 INIT_LIST_HEAD(&dev->qmi_req_pool);
1358 INIT_LIST_HEAD(&dev->qmi_req_q);
1359 INIT_LIST_HEAD(&dev->qmi_resp_pool);
1360 INIT_LIST_HEAD(&dev->qmi_resp_q);
1361 INIT_LIST_HEAD(&dev->rx_idle);
1362 INIT_LIST_HEAD(&dev->rx_queue);
1363 INIT_LIST_HEAD(&dev->tx_idle);
1364
1365 dev->function.name = "rmnet";
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301366 dev->function.strings = rmnet_smd_strings;
1367 dev->function.descriptors = rmnet_smd_fs_function;
1368 dev->function.hs_descriptors = rmnet_smd_hs_function;
1369 dev->function.bind = rmnet_smd_bind;
1370 dev->function.unbind = rmnet_smd_unbind;
1371 dev->function.setup = rmnet_smd_setup;
1372 dev->function.set_alt = rmnet_smd_set_alt;
1373 dev->function.disable = rmnet_smd_disable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001374
1375 ret = usb_add_function(c, &dev->function);
1376 if (ret)
1377 goto free_wq;
1378
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301379 rmnet_smd_debugfs_init(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380
1381 return 0;
1382
1383free_wq:
1384 destroy_workqueue(dev->wq);
1385free_dev:
1386 kfree(dev);
1387
1388 return ret;
1389}