blob: b71f64686781481236eee733dd77a6fcb4f3b784 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * f_rmnet.c -- RmNet function driver
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
7 * Copyright (C) 2008 Nokia Corporation
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05308 * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/err.h>
29#include <linux/interrupt.h>
30#include <linux/list.h>
31#include <linux/device.h>
32#include <linux/wait.h>
33#include <linux/workqueue.h>
34#include <linux/bitops.h>
35#include <linux/termios.h>
36#include <linux/debugfs.h>
37
38#include <mach/msm_smd.h>
39#include <linux/usb/cdc.h>
40#include <linux/usb/composite.h>
41#include <linux/usb/ch9.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
43#include "gadget_chips.h"
44
Pavankumar Kondeti48daffd2011-10-27 14:57:46 +053045#ifndef CONFIG_MSM_SMD
46#define CONFIG_RMNET_SMD_CTL_CHANNEL ""
47#define CONFIG_RMNET_SMD_DATA_CHANNEL ""
48#endif
49
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
51module_param(rmnet_ctl_ch, charp, S_IRUGO);
52MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
53
54static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
55module_param(rmnet_data_ch, charp, S_IRUGO);
56MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
57
Manu Gautam1c8ffd72011-09-02 16:00:49 +053058#define RMNET_SMD_ACM_CTRL_DTR (1 << 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Manu Gautam1c8ffd72011-09-02 16:00:49 +053060#define RMNET_SMD_NOTIFY_INTERVAL 5
61#define RMNET_SMD_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Manu Gautam1c8ffd72011-09-02 16:00:49 +053063#define QMI_REQ_MAX 4
64#define QMI_REQ_SIZE 2048
65#define QMI_RESP_MAX 8
66#define QMI_RESP_SIZE 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Manu Gautam1c8ffd72011-09-02 16:00:49 +053068#define RMNET_RX_REQ_MAX 8
69#define RMNET_RX_REQ_SIZE 2048
70#define RMNET_TX_REQ_MAX 8
71#define RMNET_TX_REQ_SIZE 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
Manu Gautam1c8ffd72011-09-02 16:00:49 +053073#define RMNET_TXN_MAX 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
75/* QMI requests & responses buffer*/
76struct qmi_buf {
77 void *buf;
78 int len;
79 struct list_head list;
80};
81
82/* Control & data SMD channel private data */
Manu Gautam1c8ffd72011-09-02 16:00:49 +053083struct rmnet_smd_ch_info {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 struct smd_channel *ch;
85 struct tasklet_struct tx_tlet;
86 struct tasklet_struct rx_tlet;
87#define CH_OPENED 0
88 unsigned long flags;
89 /* pending rx packet length */
90 atomic_t rx_pkt;
91 /* wait for smd open event*/
92 wait_queue_head_t wait;
93};
94
Manu Gautam1c8ffd72011-09-02 16:00:49 +053095struct rmnet_smd_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 struct usb_function function;
97 struct usb_composite_dev *cdev;
98
99 struct usb_ep *epout;
100 struct usb_ep *epin;
101 struct usb_ep *epnotify;
102 struct usb_request *notify_req;
103
104 u8 ifc_id;
105 /* QMI lists */
106 struct list_head qmi_req_pool;
107 struct list_head qmi_resp_pool;
108 struct list_head qmi_req_q;
109 struct list_head qmi_resp_q;
110 /* Tx/Rx lists */
111 struct list_head tx_idle;
112 struct list_head rx_idle;
113 struct list_head rx_queue;
114
115 spinlock_t lock;
116 atomic_t online;
117 atomic_t notify_count;
118
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530119 struct platform_driver pdrv;
120 u8 is_pdrv_used;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530121 struct rmnet_smd_ch_info smd_ctl;
122 struct rmnet_smd_ch_info smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124 struct workqueue_struct *wq;
125 struct work_struct connect_work;
126 struct work_struct disconnect_work;
127
128 unsigned long dpkts_to_host;
129 unsigned long dpkts_from_modem;
130 unsigned long dpkts_from_host;
131 unsigned long dpkts_to_modem;
132
133 unsigned long cpkts_to_host;
134 unsigned long cpkts_from_modem;
135 unsigned long cpkts_from_host;
136 unsigned long cpkts_to_modem;
137};
138
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530139static struct rmnet_smd_dev *rmnet_smd;
140
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530141static struct usb_interface_descriptor rmnet_smd_interface_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 .bLength = USB_DT_INTERFACE_SIZE,
143 .bDescriptorType = USB_DT_INTERFACE,
144 /* .bInterfaceNumber = DYNAMIC */
145 .bNumEndpoints = 3,
146 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
147 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
148 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
149 /* .iInterface = DYNAMIC */
150};
151
152/* Full speed support */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530153static struct usb_endpoint_descriptor rmnet_smd_fs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 .bLength = USB_DT_ENDPOINT_SIZE,
155 .bDescriptorType = USB_DT_ENDPOINT,
156 .bEndpointAddress = USB_DIR_IN,
157 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530158 .wMaxPacketSize = __constant_cpu_to_le16(
159 RMNET_SMD_MAX_NOTIFY_SIZE),
160 .bInterval = 1 << RMNET_SMD_NOTIFY_INTERVAL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161};
162
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530163static struct usb_endpoint_descriptor rmnet_smd_fs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 .bLength = USB_DT_ENDPOINT_SIZE,
165 .bDescriptorType = USB_DT_ENDPOINT,
166 .bEndpointAddress = USB_DIR_IN,
167 .bmAttributes = USB_ENDPOINT_XFER_BULK,
168 .wMaxPacketSize = __constant_cpu_to_le16(64),
169};
170
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530171static struct usb_endpoint_descriptor rmnet_smd_fs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 .bLength = USB_DT_ENDPOINT_SIZE,
173 .bDescriptorType = USB_DT_ENDPOINT,
174 .bEndpointAddress = USB_DIR_OUT,
175 .bmAttributes = USB_ENDPOINT_XFER_BULK,
176 .wMaxPacketSize = __constant_cpu_to_le16(64),
177};
178
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530179static struct usb_descriptor_header *rmnet_smd_fs_function[] = {
180 (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
181 (struct usb_descriptor_header *) &rmnet_smd_fs_notify_desc,
182 (struct usb_descriptor_header *) &rmnet_smd_fs_in_desc,
183 (struct usb_descriptor_header *) &rmnet_smd_fs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 NULL,
185};
186
187/* High speed support */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530188static struct usb_endpoint_descriptor rmnet_smd_hs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 .bLength = USB_DT_ENDPOINT_SIZE,
190 .bDescriptorType = USB_DT_ENDPOINT,
191 .bEndpointAddress = USB_DIR_IN,
192 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530193 .wMaxPacketSize = __constant_cpu_to_le16(
194 RMNET_SMD_MAX_NOTIFY_SIZE),
195 .bInterval = RMNET_SMD_NOTIFY_INTERVAL + 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196};
197
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530198static struct usb_endpoint_descriptor rmnet_smd_hs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 .bLength = USB_DT_ENDPOINT_SIZE,
200 .bDescriptorType = USB_DT_ENDPOINT,
201 .bEndpointAddress = USB_DIR_IN,
202 .bmAttributes = USB_ENDPOINT_XFER_BULK,
203 .wMaxPacketSize = __constant_cpu_to_le16(512),
204};
205
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530206static struct usb_endpoint_descriptor rmnet_smd_hs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 .bLength = USB_DT_ENDPOINT_SIZE,
208 .bDescriptorType = USB_DT_ENDPOINT,
209 .bEndpointAddress = USB_DIR_OUT,
210 .bmAttributes = USB_ENDPOINT_XFER_BULK,
211 .wMaxPacketSize = __constant_cpu_to_le16(512),
212};
213
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530214static struct usb_descriptor_header *rmnet_smd_hs_function[] = {
215 (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
216 (struct usb_descriptor_header *) &rmnet_smd_hs_notify_desc,
217 (struct usb_descriptor_header *) &rmnet_smd_hs_in_desc,
218 (struct usb_descriptor_header *) &rmnet_smd_hs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 NULL,
220};
221
222/* String descriptors */
223
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530224static struct usb_string rmnet_smd_string_defs[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 [0].s = "QMI RmNet",
226 { } /* end of list */
227};
228
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530229static struct usb_gadget_strings rmnet_smd_string_table = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 .language = 0x0409, /* en-us */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530231 .strings = rmnet_smd_string_defs,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232};
233
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530234static struct usb_gadget_strings *rmnet_smd_strings[] = {
235 &rmnet_smd_string_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 NULL,
237};
238
239static struct qmi_buf *
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530240rmnet_smd_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241{
242 struct qmi_buf *qmi;
243
244 qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
245 if (qmi != NULL) {
246 qmi->buf = kmalloc(len, kmalloc_flags);
247 if (qmi->buf == NULL) {
248 kfree(qmi);
249 qmi = NULL;
250 }
251 }
252
253 return qmi ? qmi : ERR_PTR(-ENOMEM);
254}
255
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530256static void rmnet_smd_free_qmi(struct qmi_buf *qmi)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257{
258 kfree(qmi->buf);
259 kfree(qmi);
260}
261/*
262 * Allocate a usb_request and its buffer. Returns a pointer to the
263 * usb_request or a error code if there is an error.
264 */
265static struct usb_request *
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530266rmnet_smd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267{
268 struct usb_request *req;
269
270 req = usb_ep_alloc_request(ep, kmalloc_flags);
271
272 if (req != NULL) {
273 req->length = len;
274 req->buf = kmalloc(len, kmalloc_flags);
275 if (req->buf == NULL) {
276 usb_ep_free_request(ep, req);
277 req = NULL;
278 }
279 }
280
281 return req ? req : ERR_PTR(-ENOMEM);
282}
283
284/*
285 * Free a usb_request and its buffer.
286 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530287static void rmnet_smd_free_req(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288{
289 kfree(req->buf);
290 usb_ep_free_request(ep, req);
291}
292
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530293static void rmnet_smd_notify_complete(struct usb_ep *ep,
294 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530296 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 struct usb_composite_dev *cdev = dev->cdev;
298 int status = req->status;
299
300 switch (status) {
301 case -ECONNRESET:
302 case -ESHUTDOWN:
303 /* connection gone */
304 atomic_set(&dev->notify_count, 0);
305 break;
306 default:
307 ERROR(cdev, "rmnet notify ep error %d\n", status);
308 /* FALLTHROUGH */
309 case 0:
310 if (ep != dev->epnotify)
311 break;
312
313 /* handle multiple pending QMI_RESPONSE_AVAILABLE
314 * notifications by resending until we're done
315 */
316 if (atomic_dec_and_test(&dev->notify_count))
317 break;
318
319 status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
320 if (status) {
321 atomic_dec(&dev->notify_count);
322 ERROR(cdev, "rmnet notify ep enqueue error %d\n",
323 status);
324 }
325 break;
326 }
327}
328
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530329static void qmi_smd_response_available(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330{
331 struct usb_composite_dev *cdev = dev->cdev;
332 struct usb_request *req = dev->notify_req;
333 struct usb_cdc_notification *event = req->buf;
334 int status;
335
336 /* Response will be sent later */
337 if (atomic_inc_return(&dev->notify_count) != 1)
338 return;
339
340 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
341 | USB_RECIP_INTERFACE;
342 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
343 event->wValue = cpu_to_le16(0);
344 event->wIndex = cpu_to_le16(dev->ifc_id);
345 event->wLength = cpu_to_le16(0);
346
347 status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
348 if (status < 0) {
349 atomic_dec(&dev->notify_count);
350 ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
351 }
352}
353
354/* TODO
355 * handle modem restart events
356 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530357static void rmnet_smd_event_notify(void *priv, unsigned event)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530359 struct rmnet_smd_ch_info *smd_info = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 int len = atomic_read(&smd_info->rx_pkt);
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530361 struct rmnet_smd_dev *dev =
362 (struct rmnet_smd_dev *) smd_info->tx_tlet.data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363
364 switch (event) {
365 case SMD_EVENT_DATA: {
366 if (!atomic_read(&dev->online))
367 break;
368 if (len && (smd_write_avail(smd_info->ch) >= len))
369 tasklet_schedule(&smd_info->rx_tlet);
370
371 if (smd_read_avail(smd_info->ch))
372 tasklet_schedule(&smd_info->tx_tlet);
373
374 break;
375 }
376 case SMD_EVENT_OPEN:
377 /* usb endpoints are not enabled untill smd channels
378 * are opened. wake up worker thread to continue
379 * connection processing
380 */
381 set_bit(CH_OPENED, &smd_info->flags);
382 wake_up(&smd_info->wait);
383 break;
384 case SMD_EVENT_CLOSE:
385 /* We will never come here.
386 * reset flags after closing smd channel
387 * */
388 clear_bit(CH_OPENED, &smd_info->flags);
389 break;
390 }
391}
392
393static void rmnet_control_tx_tlet(unsigned long arg)
394{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530395 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 struct usb_composite_dev *cdev = dev->cdev;
397 struct qmi_buf *qmi_resp;
398 int sz;
399 unsigned long flags;
400
401 while (1) {
402 sz = smd_cur_packet_size(dev->smd_ctl.ch);
403 if (sz == 0)
404 break;
405 if (smd_read_avail(dev->smd_ctl.ch) < sz)
406 break;
407
408 spin_lock_irqsave(&dev->lock, flags);
409 if (list_empty(&dev->qmi_resp_pool)) {
410 ERROR(cdev, "rmnet QMI Tx buffers full\n");
411 spin_unlock_irqrestore(&dev->lock, flags);
412 break;
413 }
414 qmi_resp = list_first_entry(&dev->qmi_resp_pool,
415 struct qmi_buf, list);
416 list_del(&qmi_resp->list);
417 spin_unlock_irqrestore(&dev->lock, flags);
418
419 qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
420
421 spin_lock_irqsave(&dev->lock, flags);
422 dev->cpkts_from_modem++;
423 list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
424 spin_unlock_irqrestore(&dev->lock, flags);
425
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530426 qmi_smd_response_available(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 }
428
429}
430
431static void rmnet_control_rx_tlet(unsigned long arg)
432{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530433 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 struct usb_composite_dev *cdev = dev->cdev;
435 struct qmi_buf *qmi_req;
436 int ret;
437 unsigned long flags;
438
439 spin_lock_irqsave(&dev->lock, flags);
440 while (1) {
441
442 if (list_empty(&dev->qmi_req_q)) {
443 atomic_set(&dev->smd_ctl.rx_pkt, 0);
444 break;
445 }
446 qmi_req = list_first_entry(&dev->qmi_req_q,
447 struct qmi_buf, list);
448 if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
449 atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
450 DBG(cdev, "rmnet control smd channel full\n");
451 break;
452 }
453
454 list_del(&qmi_req->list);
455 dev->cpkts_from_host++;
456 spin_unlock_irqrestore(&dev->lock, flags);
457 ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
458 spin_lock_irqsave(&dev->lock, flags);
459 if (ret != qmi_req->len) {
460 ERROR(cdev, "rmnet control smd write failed\n");
461 break;
462 }
463 dev->cpkts_to_modem++;
464 list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
465 }
466 spin_unlock_irqrestore(&dev->lock, flags);
467}
468
Manu Gautamfb7401c2011-09-26 14:11:11 +0530469static void rmnet_smd_command_complete(struct usb_ep *ep,
470 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530472 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 struct usb_composite_dev *cdev = dev->cdev;
474 struct qmi_buf *qmi_req;
475 int ret;
476
477 if (req->status < 0) {
478 ERROR(cdev, "rmnet command error %d\n", req->status);
479 return;
480 }
481
482 spin_lock(&dev->lock);
483 dev->cpkts_from_host++;
484 /* no pending control rx packet */
485 if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
486 if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
487 atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
488 goto queue_req;
489 }
490 spin_unlock(&dev->lock);
491 ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
492 /* This should never happen */
493 if (ret != req->actual)
494 ERROR(cdev, "rmnet control smd write failed\n");
495 spin_lock(&dev->lock);
496 dev->cpkts_to_modem++;
497 spin_unlock(&dev->lock);
498 return;
499 }
500queue_req:
501 if (list_empty(&dev->qmi_req_pool)) {
502 spin_unlock(&dev->lock);
503 ERROR(cdev, "rmnet QMI pool is empty\n");
504 return;
505 }
506
507 qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
508 list_del(&qmi_req->list);
509 spin_unlock(&dev->lock);
510 memcpy(qmi_req->buf, req->buf, req->actual);
511 qmi_req->len = req->actual;
512 spin_lock(&dev->lock);
513 list_add_tail(&qmi_req->list, &dev->qmi_req_q);
514 spin_unlock(&dev->lock);
515}
516static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
517{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530518 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519
520 spin_lock(&dev->lock);
521 dev->cpkts_to_host++;
522 spin_unlock(&dev->lock);
523}
524
525static int
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530526rmnet_smd_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527{
Manu Gautamfb7401c2011-09-26 14:11:11 +0530528 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
529 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 struct usb_composite_dev *cdev = f->config->cdev;
531 struct usb_request *req = cdev->req;
532 int ret = -EOPNOTSUPP;
533 u16 w_index = le16_to_cpu(ctrl->wIndex);
534 u16 w_value = le16_to_cpu(ctrl->wValue);
535 u16 w_length = le16_to_cpu(ctrl->wLength);
536 struct qmi_buf *resp;
537 int schedule = 0;
538
539 if (!atomic_read(&dev->online))
540 return -ENOTCONN;
541
542 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
543
544 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
545 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 ret = w_length;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530547 req->complete = rmnet_smd_command_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 req->context = dev;
549 break;
550
551
552 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
553 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
554 if (w_value)
555 goto invalid;
556 else {
557 spin_lock(&dev->lock);
558 if (list_empty(&dev->qmi_resp_q)) {
559 INFO(cdev, "qmi resp empty "
560 " req%02x.%02x v%04x i%04x l%d\n",
561 ctrl->bRequestType, ctrl->bRequest,
562 w_value, w_index, w_length);
563 spin_unlock(&dev->lock);
564 goto invalid;
565 }
566 resp = list_first_entry(&dev->qmi_resp_q,
567 struct qmi_buf, list);
568 list_del(&resp->list);
569 spin_unlock(&dev->lock);
570 memcpy(req->buf, resp->buf, resp->len);
571 ret = resp->len;
572 spin_lock(&dev->lock);
573
574 if (list_empty(&dev->qmi_resp_pool))
575 schedule = 1;
576 list_add_tail(&resp->list, &dev->qmi_resp_pool);
577
578 if (schedule)
579 tasklet_schedule(&dev->smd_ctl.tx_tlet);
580 spin_unlock(&dev->lock);
581 req->complete = rmnet_txcommand_complete;
582 req->context = dev;
583 }
584 break;
585 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
586 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
587 /* This is a workaround for RmNet and is borrowed from the
588 * CDC/ACM standard. The host driver will issue the above ACM
589 * standard request to the RmNet interface in the following
590 * scenario: Once the network adapter is disabled from device
591 * manager, the above request will be sent from the qcusbnet
592 * host driver, with DTR being '0'. Once network adapter is
593 * enabled from device manager (or during enumeration), the
594 * request will be sent with DTR being '1'.
595 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530596 if (w_value & RMNET_SMD_ACM_CTRL_DTR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
598 else
599 ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
600
601 break;
602 default:
603
604invalid:
605 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
606 ctrl->bRequestType, ctrl->bRequest,
607 w_value, w_index, w_length);
608 }
609
610 /* respond with data transfer or status phase? */
611 if (ret >= 0) {
612 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
613 ctrl->bRequestType, ctrl->bRequest,
614 w_value, w_index, w_length);
615 req->zero = 0;
616 req->length = ret;
617 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
618 if (ret < 0)
619 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
620 }
621
622 return ret;
623}
624
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530625static void rmnet_smd_start_rx(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626{
627 struct usb_composite_dev *cdev = dev->cdev;
628 int status;
629 struct usb_request *req;
630 struct list_head *pool = &dev->rx_idle;
631 unsigned long flags;
632
633 spin_lock_irqsave(&dev->lock, flags);
634 while (!list_empty(pool)) {
635 req = list_entry(pool->next, struct usb_request, list);
636 list_del(&req->list);
637
638 spin_unlock_irqrestore(&dev->lock, flags);
639 status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
640 spin_lock_irqsave(&dev->lock, flags);
641
642 if (status) {
643 ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
644 list_add_tail(&req->list, pool);
645 break;
646 }
647 }
648 spin_unlock_irqrestore(&dev->lock, flags);
649}
650
651static void rmnet_data_tx_tlet(unsigned long arg)
652{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530653 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 struct usb_composite_dev *cdev = dev->cdev;
655 struct usb_request *req;
656 int status;
657 int sz;
658 unsigned long flags;
659
660 while (1) {
661
662 sz = smd_cur_packet_size(dev->smd_data.ch);
663 if (sz == 0)
664 break;
665 if (smd_read_avail(dev->smd_data.ch) < sz)
666 break;
667
668 spin_lock_irqsave(&dev->lock, flags);
669 if (list_empty(&dev->tx_idle)) {
670 spin_unlock_irqrestore(&dev->lock, flags);
671 DBG(cdev, "rmnet data Tx buffers full\n");
672 break;
673 }
674 req = list_first_entry(&dev->tx_idle, struct usb_request, list);
675 list_del(&req->list);
676 spin_unlock_irqrestore(&dev->lock, flags);
677
678 req->length = smd_read(dev->smd_data.ch, req->buf, sz);
679 status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
680 if (status) {
681 ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
682 spin_lock_irqsave(&dev->lock, flags);
683 list_add_tail(&req->list, &dev->tx_idle);
684 spin_unlock_irqrestore(&dev->lock, flags);
685 break;
686 }
687 spin_lock_irqsave(&dev->lock, flags);
688 dev->dpkts_from_modem++;
689 spin_unlock_irqrestore(&dev->lock, flags);
690 }
691
692}
693
694static void rmnet_data_rx_tlet(unsigned long arg)
695{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530696 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 struct usb_composite_dev *cdev = dev->cdev;
698 struct usb_request *req;
699 int ret;
700 unsigned long flags;
701
702 spin_lock_irqsave(&dev->lock, flags);
703 while (1) {
704 if (list_empty(&dev->rx_queue)) {
705 atomic_set(&dev->smd_data.rx_pkt, 0);
706 break;
707 }
708 req = list_first_entry(&dev->rx_queue,
709 struct usb_request, list);
710 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
711 atomic_set(&dev->smd_data.rx_pkt, req->actual);
712 DBG(cdev, "rmnet SMD data channel full\n");
713 break;
714 }
715
716 list_del(&req->list);
717 spin_unlock_irqrestore(&dev->lock, flags);
718 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
719 spin_lock_irqsave(&dev->lock, flags);
720 if (ret != req->actual) {
721 ERROR(cdev, "rmnet SMD data write failed\n");
722 break;
723 }
724 dev->dpkts_to_modem++;
725 list_add_tail(&req->list, &dev->rx_idle);
726 }
727 spin_unlock_irqrestore(&dev->lock, flags);
728
729 /* We have free rx data requests. */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530730 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731}
732
733/* If SMD has enough room to accommodate a data rx packet,
734 * write into SMD directly. Otherwise enqueue to rx_queue.
735 * We will not write into SMD directly untill rx_queue is
736 * empty to strictly follow the ordering requests.
737 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530738static void rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530740 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 struct usb_composite_dev *cdev = dev->cdev;
742 int status = req->status;
743 int ret;
744
745 switch (status) {
746 case 0:
747 /* normal completion */
748 break;
749 case -ECONNRESET:
750 case -ESHUTDOWN:
751 /* connection gone */
752 spin_lock(&dev->lock);
753 list_add_tail(&req->list, &dev->rx_idle);
754 spin_unlock(&dev->lock);
755 return;
756 default:
757 /* unexpected failure */
758 ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
759 ep->name, status,
760 req->actual, req->length);
761 spin_lock(&dev->lock);
762 list_add_tail(&req->list, &dev->rx_idle);
763 spin_unlock(&dev->lock);
764 return;
765 }
766
767 spin_lock(&dev->lock);
768 dev->dpkts_from_host++;
769 if (!atomic_read(&dev->smd_data.rx_pkt)) {
770 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
771 atomic_set(&dev->smd_data.rx_pkt, req->actual);
772 goto queue_req;
773 }
774 spin_unlock(&dev->lock);
775 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
776 /* This should never happen */
777 if (ret != req->actual)
778 ERROR(cdev, "rmnet data smd write failed\n");
779 /* Restart Rx */
780 spin_lock(&dev->lock);
781 dev->dpkts_to_modem++;
782 list_add_tail(&req->list, &dev->rx_idle);
783 spin_unlock(&dev->lock);
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530784 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 return;
786 }
787queue_req:
788 list_add_tail(&req->list, &dev->rx_queue);
789 spin_unlock(&dev->lock);
790}
791
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530792static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530794 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 struct usb_composite_dev *cdev = dev->cdev;
796 int status = req->status;
797 int schedule = 0;
798
799 switch (status) {
800 case -ECONNRESET:
801 case -ESHUTDOWN:
802 /* connection gone */
803 spin_lock(&dev->lock);
804 list_add_tail(&req->list, &dev->tx_idle);
805 spin_unlock(&dev->lock);
806 break;
807 default:
808 ERROR(cdev, "rmnet data tx ep error %d\n", status);
809 /* FALLTHROUGH */
810 case 0:
811 spin_lock(&dev->lock);
812 if (list_empty(&dev->tx_idle))
813 schedule = 1;
814 list_add_tail(&req->list, &dev->tx_idle);
815 dev->dpkts_to_host++;
816 if (schedule)
817 tasklet_schedule(&dev->smd_data.tx_tlet);
818 spin_unlock(&dev->lock);
819 break;
820 }
821
822}
823
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530824static void rmnet_smd_disconnect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825{
826 struct qmi_buf *qmi;
827 struct usb_request *req;
828 struct list_head *act, *tmp;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530829 struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 disconnect_work);
831
832 tasklet_kill(&dev->smd_ctl.rx_tlet);
833 tasklet_kill(&dev->smd_ctl.tx_tlet);
834 tasklet_kill(&dev->smd_data.rx_tlet);
835 tasklet_kill(&dev->smd_data.tx_tlet);
836
837 smd_close(dev->smd_ctl.ch);
838 dev->smd_ctl.flags = 0;
839
840 smd_close(dev->smd_data.ch);
841 dev->smd_data.flags = 0;
842
843 atomic_set(&dev->notify_count, 0);
844
845 list_for_each_safe(act, tmp, &dev->rx_queue) {
846 req = list_entry(act, struct usb_request, list);
847 list_del(&req->list);
848 list_add_tail(&req->list, &dev->rx_idle);
849 }
850
851 list_for_each_safe(act, tmp, &dev->qmi_req_q) {
852 qmi = list_entry(act, struct qmi_buf, list);
853 list_del(&qmi->list);
854 list_add_tail(&qmi->list, &dev->qmi_req_pool);
855 }
856
857 list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
858 qmi = list_entry(act, struct qmi_buf, list);
859 list_del(&qmi->list);
860 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
861 }
862
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530863 if (dev->is_pdrv_used) {
864 platform_driver_unregister(&dev->pdrv);
865 dev->is_pdrv_used = 0;
866 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867}
868
869/* SMD close may sleep
870 * schedule a work to close smd channels
871 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530872static void rmnet_smd_disable(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530874 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
875 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
877 if (!atomic_read(&dev->online))
878 return;
879
880 atomic_set(&dev->online, 0);
881
882 usb_ep_fifo_flush(dev->epnotify);
883 usb_ep_disable(dev->epnotify);
884 usb_ep_fifo_flush(dev->epout);
885 usb_ep_disable(dev->epout);
886
887 usb_ep_fifo_flush(dev->epin);
888 usb_ep_disable(dev->epin);
889
890 /* cleanup work */
891 queue_work(dev->wq, &dev->disconnect_work);
892}
893
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530894static void rmnet_smd_connect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530896 struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
897 connect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 struct usb_composite_dev *cdev = dev->cdev;
899 int ret = 0;
900
901 /* Control channel for QMI messages */
902 ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530903 &dev->smd_ctl, rmnet_smd_event_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 if (ret) {
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530905 ERROR(cdev, "Unable to open control smd channel: %d\n", ret);
906 /*
907 * Register platform driver to be notified in case SMD channels
908 * later becomes ready to be opened.
909 */
910 ret = platform_driver_register(&dev->pdrv);
911 if (ret)
912 ERROR(cdev, "Platform driver %s register failed %d\n",
913 dev->pdrv.driver.name, ret);
914 else
915 dev->is_pdrv_used = 1;
916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 return;
918 }
919 wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
920 &dev->smd_ctl.flags));
921
922 /* Data channel for network packets */
923 ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530924 &dev->smd_data, rmnet_smd_event_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 if (ret) {
926 ERROR(cdev, "Unable to open data smd channel\n");
927 smd_close(dev->smd_ctl.ch);
928 return;
929 }
930 wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
931 &dev->smd_data.flags));
932
933 atomic_set(&dev->online, 1);
934 /* Queue Rx data requests */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530935 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936}
937
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530938static int rmnet_smd_ch_probe(struct platform_device *pdev)
939{
940 DBG(rmnet_smd->cdev, "Probe called for device: %s\n", pdev->name);
941
942 queue_work(rmnet_smd->wq, &rmnet_smd->connect_work);
943
944 return 0;
945}
946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947/* SMD open may sleep.
948 * Schedule a work to open smd channels and enable
949 * endpoints if smd channels are opened successfully.
950 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530951static int rmnet_smd_set_alt(struct usb_function *f,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 unsigned intf, unsigned alt)
953{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530954 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
955 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 struct usb_composite_dev *cdev = dev->cdev;
957 int ret = 0;
958
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200959 /* Enable epin endpoint */
960 ret = config_ep_by_speed(cdev->gadget, f, dev->epin);
961 if (ret) {
962 dev->epin->desc = NULL;
963 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
964 dev->epin->name, ret);
965 return ret;
966 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300967 ret = usb_ep_enable(dev->epin);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 if (ret) {
969 ERROR(cdev, "can't enable %s, result %d\n",
970 dev->epin->name, ret);
971 return ret;
972 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200973
974 /* Enable epout endpoint */
975 ret = config_ep_by_speed(cdev->gadget, f, dev->epout);
976 if (ret) {
977 dev->epout->desc = NULL;
978 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
979 dev->epout->name, ret);
980 usb_ep_disable(dev->epin);
981 return ret;
982 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300983 ret = usb_ep_enable(dev->epout);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985 if (ret) {
986 ERROR(cdev, "can't enable %s, result %d\n",
987 dev->epout->name, ret);
988 usb_ep_disable(dev->epin);
989 return ret;
990 }
991
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200992 /* Enable epnotify endpoint */
993 ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify);
994 if (ret) {
995 dev->epnotify->desc = NULL;
996 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
997 dev->epnotify->name, ret);
998 usb_ep_disable(dev->epin);
999 usb_ep_disable(dev->epout);
1000 return ret;
1001 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001002 ret = usb_ep_enable(dev->epnotify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001003 if (ret) {
1004 ERROR(cdev, "can't enable %s, result %d\n",
1005 dev->epnotify->name, ret);
1006 usb_ep_disable(dev->epin);
1007 usb_ep_disable(dev->epout);
1008 return ret;
1009 }
1010
1011 queue_work(dev->wq, &dev->connect_work);
1012 return 0;
1013}
1014
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301015static void rmnet_smd_free_buf(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016{
1017 struct qmi_buf *qmi;
1018 struct usb_request *req;
1019 struct list_head *act, *tmp;
1020
1021 dev->dpkts_to_host = 0;
1022 dev->dpkts_from_modem = 0;
1023 dev->dpkts_from_host = 0;
1024 dev->dpkts_to_modem = 0;
1025
1026 dev->cpkts_to_host = 0;
1027 dev->cpkts_from_modem = 0;
1028 dev->cpkts_from_host = 0;
1029 dev->cpkts_to_modem = 0;
1030 /* free all usb requests in tx pool */
1031 list_for_each_safe(act, tmp, &dev->tx_idle) {
1032 req = list_entry(act, struct usb_request, list);
1033 list_del(&req->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301034 rmnet_smd_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035 }
1036
1037 /* free all usb requests in rx pool */
1038 list_for_each_safe(act, tmp, &dev->rx_idle) {
1039 req = list_entry(act, struct usb_request, list);
1040 list_del(&req->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301041 rmnet_smd_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042 }
1043
1044 /* free all buffers in qmi request pool */
1045 list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
1046 qmi = list_entry(act, struct qmi_buf, list);
1047 list_del(&qmi->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301048 rmnet_smd_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 }
1050
1051 /* free all buffers in qmi request pool */
1052 list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
1053 qmi = list_entry(act, struct qmi_buf, list);
1054 list_del(&qmi->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301055 rmnet_smd_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 }
1057
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301058 rmnet_smd_free_req(dev->epnotify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059}
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301060static int rmnet_smd_bind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061{
1062 struct usb_composite_dev *cdev = c->cdev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301063 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
1064 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065 int i, id, ret;
1066 struct qmi_buf *qmi;
1067 struct usb_request *req;
1068 struct usb_ep *ep;
1069
1070 dev->cdev = cdev;
1071
1072 /* allocate interface ID */
1073 id = usb_interface_id(c, f);
1074 if (id < 0)
1075 return id;
1076 dev->ifc_id = id;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301077 rmnet_smd_interface_desc.bInterfaceNumber = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301079 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_in_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080 if (!ep)
1081 return -ENODEV;
1082 ep->driver_data = cdev; /* claim endpoint */
1083 dev->epin = ep;
1084
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301085 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_out_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086 if (!ep)
1087 return -ENODEV;
1088 ep->driver_data = cdev; /* claim endpoint */
1089 dev->epout = ep;
1090
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301091 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092 if (!ep)
1093 return -ENODEV;
1094 ep->driver_data = cdev; /* clain endpoint */
1095 dev->epnotify = ep;
1096
1097 /* support all relevant hardware speeds... we expect that when
1098 * hardware is dual speed, all bulk-capable endpoints work at
1099 * both speeds
1100 */
1101 if (gadget_is_dualspeed(c->cdev->gadget)) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301102 rmnet_smd_hs_in_desc.bEndpointAddress =
1103 rmnet_smd_fs_in_desc.bEndpointAddress;
1104 rmnet_smd_hs_out_desc.bEndpointAddress =
1105 rmnet_smd_fs_out_desc.bEndpointAddress;
1106 rmnet_smd_hs_notify_desc.bEndpointAddress =
1107 rmnet_smd_fs_notify_desc.bEndpointAddress;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108
1109 }
1110
1111 /* allocate notification */
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301112 dev->notify_req = rmnet_smd_alloc_req(dev->epnotify,
1113 RMNET_SMD_MAX_NOTIFY_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 if (IS_ERR(dev->notify_req))
1115 return PTR_ERR(dev->notify_req);
1116
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301117 dev->notify_req->complete = rmnet_smd_notify_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 dev->notify_req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301119 dev->notify_req->length = RMNET_SMD_MAX_NOTIFY_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120
1121 /* Allocate the qmi request and response buffers */
1122 for (i = 0; i < QMI_REQ_MAX; i++) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301123 qmi = rmnet_smd_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 if (IS_ERR(qmi)) {
1125 ret = PTR_ERR(qmi);
1126 goto free_buf;
1127 }
1128 list_add_tail(&qmi->list, &dev->qmi_req_pool);
1129 }
1130
1131 for (i = 0; i < QMI_RESP_MAX; i++) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301132 qmi = rmnet_smd_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133 if (IS_ERR(qmi)) {
1134 ret = PTR_ERR(qmi);
1135 goto free_buf;
1136 }
1137 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
1138 }
1139
1140 /* Allocate bulk in/out requests for data transfer */
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301141 for (i = 0; i < RMNET_RX_REQ_MAX; i++) {
1142 req = rmnet_smd_alloc_req(dev->epout, RMNET_RX_REQ_SIZE,
1143 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144 if (IS_ERR(req)) {
1145 ret = PTR_ERR(req);
1146 goto free_buf;
1147 }
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301148 req->length = RMNET_TXN_MAX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301150 req->complete = rmnet_smd_complete_epout;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 list_add_tail(&req->list, &dev->rx_idle);
1152 }
1153
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301154 for (i = 0; i < RMNET_TX_REQ_MAX; i++) {
Manu Gautamfb7401c2011-09-26 14:11:11 +05301155 req = rmnet_smd_alloc_req(dev->epin, RMNET_TX_REQ_SIZE,
1156 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157 if (IS_ERR(req)) {
1158 ret = PTR_ERR(req);
1159 goto free_buf;
1160 }
1161 req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301162 req->complete = rmnet_smd_complete_epin;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 list_add_tail(&req->list, &dev->tx_idle);
1164 }
1165
1166 return 0;
1167
1168free_buf:
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301169 rmnet_smd_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1171 return ret;
1172}
1173
1174#if defined(CONFIG_DEBUG_FS)
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301175static ssize_t rmnet_smd_debug_read_stats(struct file *file, char __user *ubuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 size_t count, loff_t *ppos)
1177{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301178 struct rmnet_smd_dev *dev = file->private_data;
1179 struct rmnet_smd_ch_info smd_ctl_info = dev->smd_ctl;
1180 struct rmnet_smd_ch_info smd_data_info = dev->smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 char *buf;
1182 unsigned long flags;
1183 int ret;
1184
1185 buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
1186 if (!buf)
1187 return -ENOMEM;
1188
1189 spin_lock_irqsave(&dev->lock, flags);
1190 ret = scnprintf(buf, 512,
1191 "smd_control_ch_opened: %lu\n"
1192 "smd_data_ch_opened: %lu\n"
1193 "usb online : %d\n"
1194 "dpkts_from_modem: %lu\n"
1195 "dpkts_to_host: %lu\n"
1196 "pending_dpkts_to_host: %lu\n"
1197 "dpkts_from_host: %lu\n"
1198 "dpkts_to_modem: %lu\n"
1199 "pending_dpkts_to_modem: %lu\n"
1200 "cpkts_from_modem: %lu\n"
1201 "cpkts_to_host: %lu\n"
1202 "pending_cpkts_to_host: %lu\n"
1203 "cpkts_from_host: %lu\n"
1204 "cpkts_to_modem: %lu\n"
1205 "pending_cpkts_to_modem: %lu\n"
1206 "smd_read_avail_ctrl: %d\n"
1207 "smd_write_avail_ctrl: %d\n"
1208 "smd_read_avail_data: %d\n"
1209 "smd_write_avail_data: %d\n",
1210 smd_ctl_info.flags, smd_data_info.flags,
1211 atomic_read(&dev->online),
1212 dev->dpkts_from_modem, dev->dpkts_to_host,
1213 (dev->dpkts_from_modem - dev->dpkts_to_host),
1214 dev->dpkts_from_host, dev->dpkts_to_modem,
1215 (dev->dpkts_from_host - dev->dpkts_to_modem),
1216 dev->cpkts_from_modem, dev->cpkts_to_host,
1217 (dev->cpkts_from_modem - dev->cpkts_to_host),
1218 dev->cpkts_from_host, dev->cpkts_to_modem,
1219 (dev->cpkts_from_host - dev->cpkts_to_modem),
1220 smd_read_avail(dev->smd_ctl.ch),
1221 smd_write_avail(dev->smd_ctl.ch),
1222 smd_read_avail(dev->smd_data.ch),
1223 smd_write_avail(dev->smd_data.ch));
1224
1225 spin_unlock_irqrestore(&dev->lock, flags);
1226
1227 ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
1228
1229 kfree(buf);
1230
1231 return ret;
1232}
1233
Manu Gautamfb7401c2011-09-26 14:11:11 +05301234static ssize_t rmnet_smd_debug_reset_stats(struct file *file,
1235 const char __user *buf,
1236 size_t count, loff_t *ppos)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301238 struct rmnet_smd_dev *dev = file->private_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 unsigned long flags;
1240
1241 spin_lock_irqsave(&dev->lock, flags);
1242
1243 dev->dpkts_to_host = 0;
1244 dev->dpkts_from_modem = 0;
1245 dev->dpkts_from_host = 0;
1246 dev->dpkts_to_modem = 0;
1247
1248 dev->cpkts_to_host = 0;
1249 dev->cpkts_from_modem = 0;
1250 dev->cpkts_from_host = 0;
1251 dev->cpkts_to_modem = 0;
1252
1253 spin_unlock_irqrestore(&dev->lock, flags);
1254
1255 return count;
1256}
1257
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301258static int rmnet_smd_debug_open(struct inode *inode, struct file *file)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259{
1260 file->private_data = inode->i_private;
1261
1262 return 0;
1263}
1264
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301265const struct file_operations rmnet_smd_debug_stats_ops = {
1266 .open = rmnet_smd_debug_open,
1267 .read = rmnet_smd_debug_read_stats,
1268 .write = rmnet_smd_debug_reset_stats,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269};
1270
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301271struct dentry *dent_smd;
1272struct dentry *dent_smd_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301274static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275{
1276
Manu Gautam18be30b2011-11-01 15:53:17 +05301277 dent_smd = debugfs_create_dir("usb_rmnet_smd", 0);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301278 if (IS_ERR(dent_smd))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 return;
1280
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301281 dent_smd_status = debugfs_create_file("status", 0444, dent_smd, dev,
1282 &rmnet_smd_debug_stats_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301284 if (!dent_smd_status) {
1285 debugfs_remove(dent_smd);
1286 dent_smd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001287 return;
1288 }
1289
1290 return;
1291}
1292#else
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301293static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) {}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001294#endif
1295
1296static void
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301297rmnet_smd_unbind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301299 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
1300 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301
1302 tasklet_kill(&dev->smd_ctl.rx_tlet);
1303 tasklet_kill(&dev->smd_ctl.tx_tlet);
1304 tasklet_kill(&dev->smd_data.rx_tlet);
1305 tasklet_kill(&dev->smd_data.tx_tlet);
1306
1307 flush_workqueue(dev->wq);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301308 rmnet_smd_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1310
1311 destroy_workqueue(dev->wq);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301312 debugfs_remove_recursive(dent_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313 kfree(dev);
1314
1315}
1316
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301317int rmnet_smd_bind_config(struct usb_configuration *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001318{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301319 struct rmnet_smd_dev *dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 int ret;
1321
1322 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1323 if (!dev)
1324 return -ENOMEM;
1325
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05301326 rmnet_smd = dev;
1327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 dev->wq = create_singlethread_workqueue("k_rmnet_work");
1329 if (!dev->wq) {
1330 ret = -ENOMEM;
1331 goto free_dev;
1332 }
1333
1334 spin_lock_init(&dev->lock);
1335 atomic_set(&dev->notify_count, 0);
1336 atomic_set(&dev->online, 0);
1337 atomic_set(&dev->smd_ctl.rx_pkt, 0);
1338 atomic_set(&dev->smd_data.rx_pkt, 0);
1339
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301340 INIT_WORK(&dev->connect_work, rmnet_smd_connect_work);
1341 INIT_WORK(&dev->disconnect_work, rmnet_smd_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342
1343 tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
1344 (unsigned long) dev);
1345 tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
1346 (unsigned long) dev);
1347 tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
1348 (unsigned long) dev);
1349 tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
1350 (unsigned long) dev);
1351
1352 init_waitqueue_head(&dev->smd_ctl.wait);
1353 init_waitqueue_head(&dev->smd_data.wait);
1354
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05301355 dev->pdrv.probe = rmnet_smd_ch_probe;
1356 dev->pdrv.driver.name = CONFIG_RMNET_SMD_CTL_CHANNEL;
1357 dev->pdrv.driver.owner = THIS_MODULE;
1358
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359 INIT_LIST_HEAD(&dev->qmi_req_pool);
1360 INIT_LIST_HEAD(&dev->qmi_req_q);
1361 INIT_LIST_HEAD(&dev->qmi_resp_pool);
1362 INIT_LIST_HEAD(&dev->qmi_resp_q);
1363 INIT_LIST_HEAD(&dev->rx_idle);
1364 INIT_LIST_HEAD(&dev->rx_queue);
1365 INIT_LIST_HEAD(&dev->tx_idle);
1366
1367 dev->function.name = "rmnet";
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301368 dev->function.strings = rmnet_smd_strings;
1369 dev->function.descriptors = rmnet_smd_fs_function;
1370 dev->function.hs_descriptors = rmnet_smd_hs_function;
1371 dev->function.bind = rmnet_smd_bind;
1372 dev->function.unbind = rmnet_smd_unbind;
1373 dev->function.setup = rmnet_smd_setup;
1374 dev->function.set_alt = rmnet_smd_set_alt;
1375 dev->function.disable = rmnet_smd_disable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001376
1377 ret = usb_add_function(c, &dev->function);
1378 if (ret)
1379 goto free_wq;
1380
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301381 rmnet_smd_debugfs_init(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382
1383 return 0;
1384
1385free_wq:
1386 destroy_workqueue(dev->wq);
1387free_dev:
1388 kfree(dev);
1389
1390 return ret;
1391}