blob: 5e2c6ed9e33e4e61dae3f2432129664e7459e682 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * f_rmnet.c -- RmNet function driver
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
7 * Copyright (C) 2008 Nokia Corporation
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05308 * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/err.h>
29#include <linux/interrupt.h>
30#include <linux/list.h>
31#include <linux/device.h>
32#include <linux/wait.h>
33#include <linux/workqueue.h>
34#include <linux/bitops.h>
35#include <linux/termios.h>
36#include <linux/debugfs.h>
37
38#include <mach/msm_smd.h>
39#include <linux/usb/cdc.h>
40#include <linux/usb/composite.h>
41#include <linux/usb/ch9.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
43#include "gadget_chips.h"
44
Pavankumar Kondeti48daffd2011-10-27 14:57:46 +053045#ifndef CONFIG_MSM_SMD
46#define CONFIG_RMNET_SMD_CTL_CHANNEL ""
47#define CONFIG_RMNET_SMD_DATA_CHANNEL ""
48#endif
49
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
51module_param(rmnet_ctl_ch, charp, S_IRUGO);
52MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
53
54static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
55module_param(rmnet_data_ch, charp, S_IRUGO);
56MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
57
Manu Gautam1c8ffd72011-09-02 16:00:49 +053058#define RMNET_SMD_ACM_CTRL_DTR (1 << 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Manu Gautam1c8ffd72011-09-02 16:00:49 +053060#define RMNET_SMD_NOTIFY_INTERVAL 5
61#define RMNET_SMD_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Manu Gautam1c8ffd72011-09-02 16:00:49 +053063#define QMI_REQ_MAX 4
64#define QMI_REQ_SIZE 2048
65#define QMI_RESP_MAX 8
66#define QMI_RESP_SIZE 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Manu Gautam1c8ffd72011-09-02 16:00:49 +053068#define RMNET_RX_REQ_MAX 8
69#define RMNET_RX_REQ_SIZE 2048
70#define RMNET_TX_REQ_MAX 8
71#define RMNET_TX_REQ_SIZE 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
Manu Gautam1c8ffd72011-09-02 16:00:49 +053073#define RMNET_TXN_MAX 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
75/* QMI requests & responses buffer*/
76struct qmi_buf {
77 void *buf;
78 int len;
79 struct list_head list;
80};
81
82/* Control & data SMD channel private data */
Manu Gautam1c8ffd72011-09-02 16:00:49 +053083struct rmnet_smd_ch_info {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 struct smd_channel *ch;
85 struct tasklet_struct tx_tlet;
86 struct tasklet_struct rx_tlet;
87#define CH_OPENED 0
88 unsigned long flags;
89 /* pending rx packet length */
90 atomic_t rx_pkt;
91 /* wait for smd open event*/
92 wait_queue_head_t wait;
93};
94
Manu Gautam1c8ffd72011-09-02 16:00:49 +053095struct rmnet_smd_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 struct usb_function function;
97 struct usb_composite_dev *cdev;
98
99 struct usb_ep *epout;
100 struct usb_ep *epin;
101 struct usb_ep *epnotify;
102 struct usb_request *notify_req;
103
104 u8 ifc_id;
105 /* QMI lists */
106 struct list_head qmi_req_pool;
107 struct list_head qmi_resp_pool;
108 struct list_head qmi_req_q;
109 struct list_head qmi_resp_q;
110 /* Tx/Rx lists */
111 struct list_head tx_idle;
112 struct list_head rx_idle;
113 struct list_head rx_queue;
114
115 spinlock_t lock;
116 atomic_t online;
117 atomic_t notify_count;
118
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530119 struct platform_driver pdrv;
120 u8 is_pdrv_used;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530121 struct rmnet_smd_ch_info smd_ctl;
122 struct rmnet_smd_ch_info smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123
124 struct workqueue_struct *wq;
125 struct work_struct connect_work;
126 struct work_struct disconnect_work;
127
128 unsigned long dpkts_to_host;
129 unsigned long dpkts_from_modem;
130 unsigned long dpkts_from_host;
131 unsigned long dpkts_to_modem;
132
133 unsigned long cpkts_to_host;
134 unsigned long cpkts_from_modem;
135 unsigned long cpkts_from_host;
136 unsigned long cpkts_to_modem;
137};
138
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530139static struct rmnet_smd_dev *rmnet_smd;
140
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530141static struct usb_interface_descriptor rmnet_smd_interface_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 .bLength = USB_DT_INTERFACE_SIZE,
143 .bDescriptorType = USB_DT_INTERFACE,
144 /* .bInterfaceNumber = DYNAMIC */
145 .bNumEndpoints = 3,
146 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
147 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
148 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
149 /* .iInterface = DYNAMIC */
150};
151
152/* Full speed support */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530153static struct usb_endpoint_descriptor rmnet_smd_fs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 .bLength = USB_DT_ENDPOINT_SIZE,
155 .bDescriptorType = USB_DT_ENDPOINT,
156 .bEndpointAddress = USB_DIR_IN,
157 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530158 .wMaxPacketSize = __constant_cpu_to_le16(
159 RMNET_SMD_MAX_NOTIFY_SIZE),
160 .bInterval = 1 << RMNET_SMD_NOTIFY_INTERVAL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161};
162
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530163static struct usb_endpoint_descriptor rmnet_smd_fs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 .bLength = USB_DT_ENDPOINT_SIZE,
165 .bDescriptorType = USB_DT_ENDPOINT,
166 .bEndpointAddress = USB_DIR_IN,
167 .bmAttributes = USB_ENDPOINT_XFER_BULK,
168 .wMaxPacketSize = __constant_cpu_to_le16(64),
169};
170
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530171static struct usb_endpoint_descriptor rmnet_smd_fs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 .bLength = USB_DT_ENDPOINT_SIZE,
173 .bDescriptorType = USB_DT_ENDPOINT,
174 .bEndpointAddress = USB_DIR_OUT,
175 .bmAttributes = USB_ENDPOINT_XFER_BULK,
176 .wMaxPacketSize = __constant_cpu_to_le16(64),
177};
178
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530179static struct usb_descriptor_header *rmnet_smd_fs_function[] = {
180 (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
181 (struct usb_descriptor_header *) &rmnet_smd_fs_notify_desc,
182 (struct usb_descriptor_header *) &rmnet_smd_fs_in_desc,
183 (struct usb_descriptor_header *) &rmnet_smd_fs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 NULL,
185};
186
187/* High speed support */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530188static struct usb_endpoint_descriptor rmnet_smd_hs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 .bLength = USB_DT_ENDPOINT_SIZE,
190 .bDescriptorType = USB_DT_ENDPOINT,
191 .bEndpointAddress = USB_DIR_IN,
192 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530193 .wMaxPacketSize = __constant_cpu_to_le16(
194 RMNET_SMD_MAX_NOTIFY_SIZE),
195 .bInterval = RMNET_SMD_NOTIFY_INTERVAL + 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196};
197
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530198static struct usb_endpoint_descriptor rmnet_smd_hs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 .bLength = USB_DT_ENDPOINT_SIZE,
200 .bDescriptorType = USB_DT_ENDPOINT,
201 .bEndpointAddress = USB_DIR_IN,
202 .bmAttributes = USB_ENDPOINT_XFER_BULK,
203 .wMaxPacketSize = __constant_cpu_to_le16(512),
204};
205
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530206static struct usb_endpoint_descriptor rmnet_smd_hs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 .bLength = USB_DT_ENDPOINT_SIZE,
208 .bDescriptorType = USB_DT_ENDPOINT,
209 .bEndpointAddress = USB_DIR_OUT,
210 .bmAttributes = USB_ENDPOINT_XFER_BULK,
211 .wMaxPacketSize = __constant_cpu_to_le16(512),
212};
213
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530214static struct usb_descriptor_header *rmnet_smd_hs_function[] = {
215 (struct usb_descriptor_header *) &rmnet_smd_interface_desc,
216 (struct usb_descriptor_header *) &rmnet_smd_hs_notify_desc,
217 (struct usb_descriptor_header *) &rmnet_smd_hs_in_desc,
218 (struct usb_descriptor_header *) &rmnet_smd_hs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 NULL,
220};
221
222/* String descriptors */
223
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530224static struct usb_string rmnet_smd_string_defs[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 [0].s = "QMI RmNet",
226 { } /* end of list */
227};
228
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530229static struct usb_gadget_strings rmnet_smd_string_table = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 .language = 0x0409, /* en-us */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530231 .strings = rmnet_smd_string_defs,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232};
233
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530234static struct usb_gadget_strings *rmnet_smd_strings[] = {
235 &rmnet_smd_string_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 NULL,
237};
238
239static struct qmi_buf *
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530240rmnet_smd_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241{
242 struct qmi_buf *qmi;
243
244 qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
245 if (qmi != NULL) {
246 qmi->buf = kmalloc(len, kmalloc_flags);
247 if (qmi->buf == NULL) {
248 kfree(qmi);
249 qmi = NULL;
250 }
251 }
252
253 return qmi ? qmi : ERR_PTR(-ENOMEM);
254}
255
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530256static void rmnet_smd_free_qmi(struct qmi_buf *qmi)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257{
258 kfree(qmi->buf);
259 kfree(qmi);
260}
261/*
262 * Allocate a usb_request and its buffer. Returns a pointer to the
263 * usb_request or a error code if there is an error.
264 */
265static struct usb_request *
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530266rmnet_smd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267{
268 struct usb_request *req;
269
270 req = usb_ep_alloc_request(ep, kmalloc_flags);
271
272 if (req != NULL) {
273 req->length = len;
274 req->buf = kmalloc(len, kmalloc_flags);
275 if (req->buf == NULL) {
276 usb_ep_free_request(ep, req);
277 req = NULL;
278 }
279 }
280
281 return req ? req : ERR_PTR(-ENOMEM);
282}
283
284/*
285 * Free a usb_request and its buffer.
286 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530287static void rmnet_smd_free_req(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288{
289 kfree(req->buf);
290 usb_ep_free_request(ep, req);
291}
292
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530293static void rmnet_smd_notify_complete(struct usb_ep *ep,
294 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530296 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 struct usb_composite_dev *cdev = dev->cdev;
298 int status = req->status;
299
300 switch (status) {
301 case -ECONNRESET:
302 case -ESHUTDOWN:
303 /* connection gone */
304 atomic_set(&dev->notify_count, 0);
305 break;
306 default:
307 ERROR(cdev, "rmnet notify ep error %d\n", status);
308 /* FALLTHROUGH */
309 case 0:
310 if (ep != dev->epnotify)
311 break;
312
313 /* handle multiple pending QMI_RESPONSE_AVAILABLE
314 * notifications by resending until we're done
315 */
316 if (atomic_dec_and_test(&dev->notify_count))
317 break;
318
319 status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
320 if (status) {
321 atomic_dec(&dev->notify_count);
322 ERROR(cdev, "rmnet notify ep enqueue error %d\n",
323 status);
324 }
325 break;
326 }
327}
328
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530329static void qmi_smd_response_available(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330{
331 struct usb_composite_dev *cdev = dev->cdev;
332 struct usb_request *req = dev->notify_req;
333 struct usb_cdc_notification *event = req->buf;
334 int status;
335
336 /* Response will be sent later */
337 if (atomic_inc_return(&dev->notify_count) != 1)
338 return;
339
340 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
341 | USB_RECIP_INTERFACE;
342 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
343 event->wValue = cpu_to_le16(0);
344 event->wIndex = cpu_to_le16(dev->ifc_id);
345 event->wLength = cpu_to_le16(0);
346
347 status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
348 if (status < 0) {
349 atomic_dec(&dev->notify_count);
350 ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
351 }
352}
353
354/* TODO
355 * handle modem restart events
356 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530357static void rmnet_smd_event_notify(void *priv, unsigned event)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530359 struct rmnet_smd_ch_info *smd_info = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 int len = atomic_read(&smd_info->rx_pkt);
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530361 struct rmnet_smd_dev *dev =
362 (struct rmnet_smd_dev *) smd_info->tx_tlet.data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363
364 switch (event) {
365 case SMD_EVENT_DATA: {
366 if (!atomic_read(&dev->online))
367 break;
368 if (len && (smd_write_avail(smd_info->ch) >= len))
369 tasklet_schedule(&smd_info->rx_tlet);
370
371 if (smd_read_avail(smd_info->ch))
372 tasklet_schedule(&smd_info->tx_tlet);
373
374 break;
375 }
376 case SMD_EVENT_OPEN:
377 /* usb endpoints are not enabled untill smd channels
378 * are opened. wake up worker thread to continue
379 * connection processing
380 */
381 set_bit(CH_OPENED, &smd_info->flags);
382 wake_up(&smd_info->wait);
383 break;
384 case SMD_EVENT_CLOSE:
385 /* We will never come here.
386 * reset flags after closing smd channel
387 * */
388 clear_bit(CH_OPENED, &smd_info->flags);
389 break;
390 }
391}
392
393static void rmnet_control_tx_tlet(unsigned long arg)
394{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530395 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 struct usb_composite_dev *cdev = dev->cdev;
397 struct qmi_buf *qmi_resp;
398 int sz;
399 unsigned long flags;
400
401 while (1) {
402 sz = smd_cur_packet_size(dev->smd_ctl.ch);
403 if (sz == 0)
404 break;
405 if (smd_read_avail(dev->smd_ctl.ch) < sz)
406 break;
407
408 spin_lock_irqsave(&dev->lock, flags);
409 if (list_empty(&dev->qmi_resp_pool)) {
410 ERROR(cdev, "rmnet QMI Tx buffers full\n");
411 spin_unlock_irqrestore(&dev->lock, flags);
412 break;
413 }
414 qmi_resp = list_first_entry(&dev->qmi_resp_pool,
415 struct qmi_buf, list);
416 list_del(&qmi_resp->list);
417 spin_unlock_irqrestore(&dev->lock, flags);
418
419 qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
420
421 spin_lock_irqsave(&dev->lock, flags);
422 dev->cpkts_from_modem++;
423 list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
424 spin_unlock_irqrestore(&dev->lock, flags);
425
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530426 qmi_smd_response_available(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 }
428
429}
430
431static void rmnet_control_rx_tlet(unsigned long arg)
432{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530433 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 struct usb_composite_dev *cdev = dev->cdev;
435 struct qmi_buf *qmi_req;
436 int ret;
437 unsigned long flags;
438
439 spin_lock_irqsave(&dev->lock, flags);
440 while (1) {
441
442 if (list_empty(&dev->qmi_req_q)) {
443 atomic_set(&dev->smd_ctl.rx_pkt, 0);
444 break;
445 }
446 qmi_req = list_first_entry(&dev->qmi_req_q,
447 struct qmi_buf, list);
448 if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
449 atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
450 DBG(cdev, "rmnet control smd channel full\n");
451 break;
452 }
453
454 list_del(&qmi_req->list);
455 dev->cpkts_from_host++;
456 spin_unlock_irqrestore(&dev->lock, flags);
457 ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
458 spin_lock_irqsave(&dev->lock, flags);
459 if (ret != qmi_req->len) {
460 ERROR(cdev, "rmnet control smd write failed\n");
461 break;
462 }
463 dev->cpkts_to_modem++;
464 list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
465 }
466 spin_unlock_irqrestore(&dev->lock, flags);
467}
468
Manu Gautamfb7401c2011-09-26 14:11:11 +0530469static void rmnet_smd_command_complete(struct usb_ep *ep,
470 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530472 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 struct usb_composite_dev *cdev = dev->cdev;
474 struct qmi_buf *qmi_req;
475 int ret;
476
477 if (req->status < 0) {
478 ERROR(cdev, "rmnet command error %d\n", req->status);
479 return;
480 }
481
482 spin_lock(&dev->lock);
483 dev->cpkts_from_host++;
484 /* no pending control rx packet */
485 if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
486 if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
487 atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
488 goto queue_req;
489 }
490 spin_unlock(&dev->lock);
491 ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
492 /* This should never happen */
493 if (ret != req->actual)
494 ERROR(cdev, "rmnet control smd write failed\n");
495 spin_lock(&dev->lock);
496 dev->cpkts_to_modem++;
497 spin_unlock(&dev->lock);
498 return;
499 }
500queue_req:
501 if (list_empty(&dev->qmi_req_pool)) {
502 spin_unlock(&dev->lock);
503 ERROR(cdev, "rmnet QMI pool is empty\n");
504 return;
505 }
506
507 qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
508 list_del(&qmi_req->list);
509 spin_unlock(&dev->lock);
510 memcpy(qmi_req->buf, req->buf, req->actual);
511 qmi_req->len = req->actual;
512 spin_lock(&dev->lock);
513 list_add_tail(&qmi_req->list, &dev->qmi_req_q);
514 spin_unlock(&dev->lock);
515}
516static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
517{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530518 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519
520 spin_lock(&dev->lock);
521 dev->cpkts_to_host++;
522 spin_unlock(&dev->lock);
523}
524
525static int
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530526rmnet_smd_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527{
Manu Gautamfb7401c2011-09-26 14:11:11 +0530528 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
529 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 struct usb_composite_dev *cdev = f->config->cdev;
531 struct usb_request *req = cdev->req;
532 int ret = -EOPNOTSUPP;
533 u16 w_index = le16_to_cpu(ctrl->wIndex);
534 u16 w_value = le16_to_cpu(ctrl->wValue);
535 u16 w_length = le16_to_cpu(ctrl->wLength);
536 struct qmi_buf *resp;
537 int schedule = 0;
538
539 if (!atomic_read(&dev->online))
540 return -ENOTCONN;
541
542 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
543
544 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
545 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 ret = w_length;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530547 req->complete = rmnet_smd_command_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 req->context = dev;
549 break;
550
551
552 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
553 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
554 if (w_value)
555 goto invalid;
556 else {
557 spin_lock(&dev->lock);
558 if (list_empty(&dev->qmi_resp_q)) {
559 INFO(cdev, "qmi resp empty "
560 " req%02x.%02x v%04x i%04x l%d\n",
561 ctrl->bRequestType, ctrl->bRequest,
562 w_value, w_index, w_length);
563 spin_unlock(&dev->lock);
564 goto invalid;
565 }
566 resp = list_first_entry(&dev->qmi_resp_q,
567 struct qmi_buf, list);
568 list_del(&resp->list);
569 spin_unlock(&dev->lock);
570 memcpy(req->buf, resp->buf, resp->len);
571 ret = resp->len;
572 spin_lock(&dev->lock);
573
574 if (list_empty(&dev->qmi_resp_pool))
575 schedule = 1;
576 list_add_tail(&resp->list, &dev->qmi_resp_pool);
577
578 if (schedule)
579 tasklet_schedule(&dev->smd_ctl.tx_tlet);
580 spin_unlock(&dev->lock);
581 req->complete = rmnet_txcommand_complete;
582 req->context = dev;
583 }
584 break;
585 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
586 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
587 /* This is a workaround for RmNet and is borrowed from the
588 * CDC/ACM standard. The host driver will issue the above ACM
589 * standard request to the RmNet interface in the following
590 * scenario: Once the network adapter is disabled from device
591 * manager, the above request will be sent from the qcusbnet
592 * host driver, with DTR being '0'. Once network adapter is
593 * enabled from device manager (or during enumeration), the
594 * request will be sent with DTR being '1'.
595 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530596 if (w_value & RMNET_SMD_ACM_CTRL_DTR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
598 else
599 ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
600
601 break;
602 default:
603
604invalid:
605 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
606 ctrl->bRequestType, ctrl->bRequest,
607 w_value, w_index, w_length);
608 }
609
610 /* respond with data transfer or status phase? */
611 if (ret >= 0) {
612 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
613 ctrl->bRequestType, ctrl->bRequest,
614 w_value, w_index, w_length);
615 req->zero = 0;
616 req->length = ret;
617 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
618 if (ret < 0)
619 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
620 }
621
622 return ret;
623}
624
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530625static void rmnet_smd_start_rx(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626{
627 struct usb_composite_dev *cdev = dev->cdev;
628 int status;
629 struct usb_request *req;
630 struct list_head *pool = &dev->rx_idle;
631 unsigned long flags;
632
633 spin_lock_irqsave(&dev->lock, flags);
634 while (!list_empty(pool)) {
635 req = list_entry(pool->next, struct usb_request, list);
636 list_del(&req->list);
637
638 spin_unlock_irqrestore(&dev->lock, flags);
639 status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
640 spin_lock_irqsave(&dev->lock, flags);
641
642 if (status) {
643 ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
644 list_add_tail(&req->list, pool);
645 break;
646 }
647 }
648 spin_unlock_irqrestore(&dev->lock, flags);
649}
650
651static void rmnet_data_tx_tlet(unsigned long arg)
652{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530653 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 struct usb_composite_dev *cdev = dev->cdev;
655 struct usb_request *req;
656 int status;
657 int sz;
658 unsigned long flags;
659
660 while (1) {
661
662 sz = smd_cur_packet_size(dev->smd_data.ch);
663 if (sz == 0)
664 break;
665 if (smd_read_avail(dev->smd_data.ch) < sz)
666 break;
667
668 spin_lock_irqsave(&dev->lock, flags);
669 if (list_empty(&dev->tx_idle)) {
670 spin_unlock_irqrestore(&dev->lock, flags);
671 DBG(cdev, "rmnet data Tx buffers full\n");
672 break;
673 }
674 req = list_first_entry(&dev->tx_idle, struct usb_request, list);
675 list_del(&req->list);
676 spin_unlock_irqrestore(&dev->lock, flags);
677
678 req->length = smd_read(dev->smd_data.ch, req->buf, sz);
679 status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
680 if (status) {
681 ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
682 spin_lock_irqsave(&dev->lock, flags);
683 list_add_tail(&req->list, &dev->tx_idle);
684 spin_unlock_irqrestore(&dev->lock, flags);
685 break;
686 }
687 spin_lock_irqsave(&dev->lock, flags);
688 dev->dpkts_from_modem++;
689 spin_unlock_irqrestore(&dev->lock, flags);
690 }
691
692}
693
694static void rmnet_data_rx_tlet(unsigned long arg)
695{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530696 struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 struct usb_composite_dev *cdev = dev->cdev;
698 struct usb_request *req;
699 int ret;
700 unsigned long flags;
701
702 spin_lock_irqsave(&dev->lock, flags);
703 while (1) {
704 if (list_empty(&dev->rx_queue)) {
705 atomic_set(&dev->smd_data.rx_pkt, 0);
706 break;
707 }
708 req = list_first_entry(&dev->rx_queue,
709 struct usb_request, list);
710 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
711 atomic_set(&dev->smd_data.rx_pkt, req->actual);
712 DBG(cdev, "rmnet SMD data channel full\n");
713 break;
714 }
715
716 list_del(&req->list);
717 spin_unlock_irqrestore(&dev->lock, flags);
718 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
719 spin_lock_irqsave(&dev->lock, flags);
720 if (ret != req->actual) {
721 ERROR(cdev, "rmnet SMD data write failed\n");
722 break;
723 }
724 dev->dpkts_to_modem++;
725 list_add_tail(&req->list, &dev->rx_idle);
726 }
727 spin_unlock_irqrestore(&dev->lock, flags);
728
729 /* We have free rx data requests. */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530730 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731}
732
733/* If SMD has enough room to accommodate a data rx packet,
734 * write into SMD directly. Otherwise enqueue to rx_queue.
735 * We will not write into SMD directly untill rx_queue is
736 * empty to strictly follow the ordering requests.
737 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530738static void rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530740 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 struct usb_composite_dev *cdev = dev->cdev;
742 int status = req->status;
743 int ret;
744
745 switch (status) {
746 case 0:
747 /* normal completion */
748 break;
749 case -ECONNRESET:
750 case -ESHUTDOWN:
751 /* connection gone */
752 spin_lock(&dev->lock);
753 list_add_tail(&req->list, &dev->rx_idle);
754 spin_unlock(&dev->lock);
755 return;
756 default:
757 /* unexpected failure */
758 ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
759 ep->name, status,
760 req->actual, req->length);
761 spin_lock(&dev->lock);
762 list_add_tail(&req->list, &dev->rx_idle);
763 spin_unlock(&dev->lock);
764 return;
765 }
766
767 spin_lock(&dev->lock);
768 dev->dpkts_from_host++;
769 if (!atomic_read(&dev->smd_data.rx_pkt)) {
770 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
771 atomic_set(&dev->smd_data.rx_pkt, req->actual);
772 goto queue_req;
773 }
774 spin_unlock(&dev->lock);
775 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
776 /* This should never happen */
777 if (ret != req->actual)
778 ERROR(cdev, "rmnet data smd write failed\n");
779 /* Restart Rx */
780 spin_lock(&dev->lock);
781 dev->dpkts_to_modem++;
782 list_add_tail(&req->list, &dev->rx_idle);
783 spin_unlock(&dev->lock);
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530784 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 return;
786 }
787queue_req:
788 list_add_tail(&req->list, &dev->rx_queue);
789 spin_unlock(&dev->lock);
790}
791
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530792static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530794 struct rmnet_smd_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 struct usb_composite_dev *cdev = dev->cdev;
796 int status = req->status;
797 int schedule = 0;
798
799 switch (status) {
800 case -ECONNRESET:
801 case -ESHUTDOWN:
802 /* connection gone */
803 spin_lock(&dev->lock);
804 list_add_tail(&req->list, &dev->tx_idle);
805 spin_unlock(&dev->lock);
806 break;
807 default:
808 ERROR(cdev, "rmnet data tx ep error %d\n", status);
809 /* FALLTHROUGH */
810 case 0:
811 spin_lock(&dev->lock);
812 if (list_empty(&dev->tx_idle))
813 schedule = 1;
814 list_add_tail(&req->list, &dev->tx_idle);
815 dev->dpkts_to_host++;
816 if (schedule)
817 tasklet_schedule(&dev->smd_data.tx_tlet);
818 spin_unlock(&dev->lock);
819 break;
820 }
821
822}
823
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530824static void rmnet_smd_disconnect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825{
826 struct qmi_buf *qmi;
827 struct usb_request *req;
828 struct list_head *act, *tmp;
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530829 struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830 disconnect_work);
831
832 tasklet_kill(&dev->smd_ctl.rx_tlet);
833 tasklet_kill(&dev->smd_ctl.tx_tlet);
834 tasklet_kill(&dev->smd_data.rx_tlet);
835 tasklet_kill(&dev->smd_data.tx_tlet);
836
837 smd_close(dev->smd_ctl.ch);
838 dev->smd_ctl.flags = 0;
839
840 smd_close(dev->smd_data.ch);
841 dev->smd_data.flags = 0;
842
843 atomic_set(&dev->notify_count, 0);
844
845 list_for_each_safe(act, tmp, &dev->rx_queue) {
846 req = list_entry(act, struct usb_request, list);
847 list_del(&req->list);
848 list_add_tail(&req->list, &dev->rx_idle);
849 }
850
851 list_for_each_safe(act, tmp, &dev->qmi_req_q) {
852 qmi = list_entry(act, struct qmi_buf, list);
853 list_del(&qmi->list);
854 list_add_tail(&qmi->list, &dev->qmi_req_pool);
855 }
856
857 list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
858 qmi = list_entry(act, struct qmi_buf, list);
859 list_del(&qmi->list);
860 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
861 }
862
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530863 if (dev->is_pdrv_used) {
864 platform_driver_unregister(&dev->pdrv);
865 dev->is_pdrv_used = 0;
866 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867}
868
869/* SMD close may sleep
870 * schedule a work to close smd channels
871 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530872static void rmnet_smd_disable(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530874 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
875 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876
877 if (!atomic_read(&dev->online))
878 return;
879
880 atomic_set(&dev->online, 0);
881
882 usb_ep_fifo_flush(dev->epnotify);
883 usb_ep_disable(dev->epnotify);
884 usb_ep_fifo_flush(dev->epout);
885 usb_ep_disable(dev->epout);
886
887 usb_ep_fifo_flush(dev->epin);
888 usb_ep_disable(dev->epin);
889
890 /* cleanup work */
891 queue_work(dev->wq, &dev->disconnect_work);
892}
893
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530894static void rmnet_smd_connect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530896 struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev,
897 connect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 struct usb_composite_dev *cdev = dev->cdev;
899 int ret = 0;
900
901 /* Control channel for QMI messages */
902 ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530903 &dev->smd_ctl, rmnet_smd_event_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 if (ret) {
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530905 ERROR(cdev, "Unable to open control smd channel: %d\n", ret);
906 /*
907 * Register platform driver to be notified in case SMD channels
908 * later becomes ready to be opened.
909 */
Chiranjeevi Velempatic49315c2012-10-03 19:45:59 +0530910 if (!dev->is_pdrv_used) {
911 ret = platform_driver_register(&dev->pdrv);
912 if (ret)
913 ERROR(cdev, "pdrv %s register failed %d\n",
914 dev->pdrv.driver.name, ret);
915 else
916 dev->is_pdrv_used = 1;
917 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 return;
919 }
920 wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
921 &dev->smd_ctl.flags));
922
923 /* Data channel for network packets */
924 ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530925 &dev->smd_data, rmnet_smd_event_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 if (ret) {
927 ERROR(cdev, "Unable to open data smd channel\n");
928 smd_close(dev->smd_ctl.ch);
929 return;
930 }
931 wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
932 &dev->smd_data.flags));
933
934 atomic_set(&dev->online, 1);
935 /* Queue Rx data requests */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530936 rmnet_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937}
938
Manu Gautam4f6ca0e2012-01-31 13:57:20 +0530939static int rmnet_smd_ch_probe(struct platform_device *pdev)
940{
941 DBG(rmnet_smd->cdev, "Probe called for device: %s\n", pdev->name);
942
943 queue_work(rmnet_smd->wq, &rmnet_smd->connect_work);
944
945 return 0;
946}
947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948/* SMD open may sleep.
949 * Schedule a work to open smd channels and enable
950 * endpoints if smd channels are opened successfully.
951 */
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530952static int rmnet_smd_set_alt(struct usb_function *f,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 unsigned intf, unsigned alt)
954{
Manu Gautam1c8ffd72011-09-02 16:00:49 +0530955 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
956 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957 struct usb_composite_dev *cdev = dev->cdev;
958 int ret = 0;
959
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200960 /* Enable epin endpoint */
961 ret = config_ep_by_speed(cdev->gadget, f, dev->epin);
962 if (ret) {
963 dev->epin->desc = NULL;
964 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
965 dev->epin->name, ret);
966 return ret;
967 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300968 ret = usb_ep_enable(dev->epin);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 if (ret) {
970 ERROR(cdev, "can't enable %s, result %d\n",
971 dev->epin->name, ret);
972 return ret;
973 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200974
975 /* Enable epout endpoint */
976 ret = config_ep_by_speed(cdev->gadget, f, dev->epout);
977 if (ret) {
978 dev->epout->desc = NULL;
979 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
980 dev->epout->name, ret);
981 usb_ep_disable(dev->epin);
982 return ret;
983 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300984 ret = usb_ep_enable(dev->epout);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 if (ret) {
987 ERROR(cdev, "can't enable %s, result %d\n",
988 dev->epout->name, ret);
989 usb_ep_disable(dev->epin);
990 return ret;
991 }
992
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200993 /* Enable epnotify endpoint */
994 ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify);
995 if (ret) {
996 dev->epnotify->desc = NULL;
997 ERROR(cdev, "config_ep_by_speed failed for ep %s, result %d\n",
998 dev->epnotify->name, ret);
999 usb_ep_disable(dev->epin);
1000 usb_ep_disable(dev->epout);
1001 return ret;
1002 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +03001003 ret = usb_ep_enable(dev->epnotify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 if (ret) {
1005 ERROR(cdev, "can't enable %s, result %d\n",
1006 dev->epnotify->name, ret);
1007 usb_ep_disable(dev->epin);
1008 usb_ep_disable(dev->epout);
1009 return ret;
1010 }
1011
1012 queue_work(dev->wq, &dev->connect_work);
1013 return 0;
1014}
1015
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301016static void rmnet_smd_free_buf(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017{
1018 struct qmi_buf *qmi;
1019 struct usb_request *req;
1020 struct list_head *act, *tmp;
1021
1022 dev->dpkts_to_host = 0;
1023 dev->dpkts_from_modem = 0;
1024 dev->dpkts_from_host = 0;
1025 dev->dpkts_to_modem = 0;
1026
1027 dev->cpkts_to_host = 0;
1028 dev->cpkts_from_modem = 0;
1029 dev->cpkts_from_host = 0;
1030 dev->cpkts_to_modem = 0;
1031 /* free all usb requests in tx pool */
1032 list_for_each_safe(act, tmp, &dev->tx_idle) {
1033 req = list_entry(act, struct usb_request, list);
1034 list_del(&req->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301035 rmnet_smd_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 }
1037
1038 /* free all usb requests in rx pool */
1039 list_for_each_safe(act, tmp, &dev->rx_idle) {
1040 req = list_entry(act, struct usb_request, list);
1041 list_del(&req->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301042 rmnet_smd_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 }
1044
1045 /* free all buffers in qmi request pool */
1046 list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
1047 qmi = list_entry(act, struct qmi_buf, list);
1048 list_del(&qmi->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301049 rmnet_smd_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 }
1051
1052 /* free all buffers in qmi request pool */
1053 list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
1054 qmi = list_entry(act, struct qmi_buf, list);
1055 list_del(&qmi->list);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301056 rmnet_smd_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 }
1058
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301059 rmnet_smd_free_req(dev->epnotify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060}
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301061static int rmnet_smd_bind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062{
1063 struct usb_composite_dev *cdev = c->cdev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301064 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
1065 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 int i, id, ret;
1067 struct qmi_buf *qmi;
1068 struct usb_request *req;
1069 struct usb_ep *ep;
1070
1071 dev->cdev = cdev;
1072
1073 /* allocate interface ID */
1074 id = usb_interface_id(c, f);
1075 if (id < 0)
1076 return id;
1077 dev->ifc_id = id;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301078 rmnet_smd_interface_desc.bInterfaceNumber = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301080 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_in_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 if (!ep)
1082 return -ENODEV;
1083 ep->driver_data = cdev; /* claim endpoint */
1084 dev->epin = ep;
1085
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301086 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_out_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 if (!ep)
1088 return -ENODEV;
1089 ep->driver_data = cdev; /* claim endpoint */
1090 dev->epout = ep;
1091
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301092 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093 if (!ep)
1094 return -ENODEV;
1095 ep->driver_data = cdev; /* clain endpoint */
1096 dev->epnotify = ep;
1097
1098 /* support all relevant hardware speeds... we expect that when
1099 * hardware is dual speed, all bulk-capable endpoints work at
1100 * both speeds
1101 */
1102 if (gadget_is_dualspeed(c->cdev->gadget)) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301103 rmnet_smd_hs_in_desc.bEndpointAddress =
1104 rmnet_smd_fs_in_desc.bEndpointAddress;
1105 rmnet_smd_hs_out_desc.bEndpointAddress =
1106 rmnet_smd_fs_out_desc.bEndpointAddress;
1107 rmnet_smd_hs_notify_desc.bEndpointAddress =
1108 rmnet_smd_fs_notify_desc.bEndpointAddress;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109
1110 }
1111
1112 /* allocate notification */
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301113 dev->notify_req = rmnet_smd_alloc_req(dev->epnotify,
1114 RMNET_SMD_MAX_NOTIFY_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115 if (IS_ERR(dev->notify_req))
1116 return PTR_ERR(dev->notify_req);
1117
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301118 dev->notify_req->complete = rmnet_smd_notify_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001119 dev->notify_req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301120 dev->notify_req->length = RMNET_SMD_MAX_NOTIFY_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121
1122 /* Allocate the qmi request and response buffers */
1123 for (i = 0; i < QMI_REQ_MAX; i++) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301124 qmi = rmnet_smd_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 if (IS_ERR(qmi)) {
1126 ret = PTR_ERR(qmi);
1127 goto free_buf;
1128 }
1129 list_add_tail(&qmi->list, &dev->qmi_req_pool);
1130 }
1131
1132 for (i = 0; i < QMI_RESP_MAX; i++) {
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301133 qmi = rmnet_smd_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 if (IS_ERR(qmi)) {
1135 ret = PTR_ERR(qmi);
1136 goto free_buf;
1137 }
1138 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
1139 }
1140
1141 /* Allocate bulk in/out requests for data transfer */
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301142 for (i = 0; i < RMNET_RX_REQ_MAX; i++) {
1143 req = rmnet_smd_alloc_req(dev->epout, RMNET_RX_REQ_SIZE,
1144 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 if (IS_ERR(req)) {
1146 ret = PTR_ERR(req);
1147 goto free_buf;
1148 }
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301149 req->length = RMNET_TXN_MAX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301151 req->complete = rmnet_smd_complete_epout;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152 list_add_tail(&req->list, &dev->rx_idle);
1153 }
1154
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301155 for (i = 0; i < RMNET_TX_REQ_MAX; i++) {
Manu Gautamfb7401c2011-09-26 14:11:11 +05301156 req = rmnet_smd_alloc_req(dev->epin, RMNET_TX_REQ_SIZE,
1157 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 if (IS_ERR(req)) {
1159 ret = PTR_ERR(req);
1160 goto free_buf;
1161 }
1162 req->context = dev;
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301163 req->complete = rmnet_smd_complete_epin;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 list_add_tail(&req->list, &dev->tx_idle);
1165 }
1166
1167 return 0;
1168
1169free_buf:
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301170 rmnet_smd_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1172 return ret;
1173}
1174
1175#if defined(CONFIG_DEBUG_FS)
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301176static ssize_t rmnet_smd_debug_read_stats(struct file *file, char __user *ubuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 size_t count, loff_t *ppos)
1178{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301179 struct rmnet_smd_dev *dev = file->private_data;
1180 struct rmnet_smd_ch_info smd_ctl_info = dev->smd_ctl;
1181 struct rmnet_smd_ch_info smd_data_info = dev->smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182 char *buf;
1183 unsigned long flags;
1184 int ret;
1185
1186 buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
1187 if (!buf)
1188 return -ENOMEM;
1189
1190 spin_lock_irqsave(&dev->lock, flags);
1191 ret = scnprintf(buf, 512,
1192 "smd_control_ch_opened: %lu\n"
1193 "smd_data_ch_opened: %lu\n"
1194 "usb online : %d\n"
1195 "dpkts_from_modem: %lu\n"
1196 "dpkts_to_host: %lu\n"
1197 "pending_dpkts_to_host: %lu\n"
1198 "dpkts_from_host: %lu\n"
1199 "dpkts_to_modem: %lu\n"
1200 "pending_dpkts_to_modem: %lu\n"
1201 "cpkts_from_modem: %lu\n"
1202 "cpkts_to_host: %lu\n"
1203 "pending_cpkts_to_host: %lu\n"
1204 "cpkts_from_host: %lu\n"
1205 "cpkts_to_modem: %lu\n"
1206 "pending_cpkts_to_modem: %lu\n"
1207 "smd_read_avail_ctrl: %d\n"
1208 "smd_write_avail_ctrl: %d\n"
1209 "smd_read_avail_data: %d\n"
1210 "smd_write_avail_data: %d\n",
1211 smd_ctl_info.flags, smd_data_info.flags,
1212 atomic_read(&dev->online),
1213 dev->dpkts_from_modem, dev->dpkts_to_host,
1214 (dev->dpkts_from_modem - dev->dpkts_to_host),
1215 dev->dpkts_from_host, dev->dpkts_to_modem,
1216 (dev->dpkts_from_host - dev->dpkts_to_modem),
1217 dev->cpkts_from_modem, dev->cpkts_to_host,
1218 (dev->cpkts_from_modem - dev->cpkts_to_host),
1219 dev->cpkts_from_host, dev->cpkts_to_modem,
1220 (dev->cpkts_from_host - dev->cpkts_to_modem),
1221 smd_read_avail(dev->smd_ctl.ch),
1222 smd_write_avail(dev->smd_ctl.ch),
1223 smd_read_avail(dev->smd_data.ch),
1224 smd_write_avail(dev->smd_data.ch));
1225
1226 spin_unlock_irqrestore(&dev->lock, flags);
1227
1228 ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
1229
1230 kfree(buf);
1231
1232 return ret;
1233}
1234
Manu Gautamfb7401c2011-09-26 14:11:11 +05301235static ssize_t rmnet_smd_debug_reset_stats(struct file *file,
1236 const char __user *buf,
1237 size_t count, loff_t *ppos)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301239 struct rmnet_smd_dev *dev = file->private_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 unsigned long flags;
1241
1242 spin_lock_irqsave(&dev->lock, flags);
1243
1244 dev->dpkts_to_host = 0;
1245 dev->dpkts_from_modem = 0;
1246 dev->dpkts_from_host = 0;
1247 dev->dpkts_to_modem = 0;
1248
1249 dev->cpkts_to_host = 0;
1250 dev->cpkts_from_modem = 0;
1251 dev->cpkts_from_host = 0;
1252 dev->cpkts_to_modem = 0;
1253
1254 spin_unlock_irqrestore(&dev->lock, flags);
1255
1256 return count;
1257}
1258
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301259static int rmnet_smd_debug_open(struct inode *inode, struct file *file)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260{
1261 file->private_data = inode->i_private;
1262
1263 return 0;
1264}
1265
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301266const struct file_operations rmnet_smd_debug_stats_ops = {
1267 .open = rmnet_smd_debug_open,
1268 .read = rmnet_smd_debug_read_stats,
1269 .write = rmnet_smd_debug_reset_stats,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270};
1271
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301272struct dentry *dent_smd;
1273struct dentry *dent_smd_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001274
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301275static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001276{
1277
Manu Gautam18be30b2011-11-01 15:53:17 +05301278 dent_smd = debugfs_create_dir("usb_rmnet_smd", 0);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301279 if (IS_ERR(dent_smd))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 return;
1281
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301282 dent_smd_status = debugfs_create_file("status", 0444, dent_smd, dev,
1283 &rmnet_smd_debug_stats_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301285 if (!dent_smd_status) {
1286 debugfs_remove(dent_smd);
1287 dent_smd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288 return;
1289 }
1290
1291 return;
1292}
1293#else
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301294static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) {}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295#endif
1296
1297static void
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301298rmnet_smd_unbind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301300 struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev,
1301 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302
1303 tasklet_kill(&dev->smd_ctl.rx_tlet);
1304 tasklet_kill(&dev->smd_ctl.tx_tlet);
1305 tasklet_kill(&dev->smd_data.rx_tlet);
1306 tasklet_kill(&dev->smd_data.tx_tlet);
1307
1308 flush_workqueue(dev->wq);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301309 rmnet_smd_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1311
1312 destroy_workqueue(dev->wq);
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301313 debugfs_remove_recursive(dent_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314 kfree(dev);
1315
1316}
1317
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301318int rmnet_smd_bind_config(struct usb_configuration *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319{
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301320 struct rmnet_smd_dev *dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 int ret;
1322
1323 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1324 if (!dev)
1325 return -ENOMEM;
1326
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05301327 rmnet_smd = dev;
1328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 dev->wq = create_singlethread_workqueue("k_rmnet_work");
1330 if (!dev->wq) {
1331 ret = -ENOMEM;
1332 goto free_dev;
1333 }
1334
1335 spin_lock_init(&dev->lock);
1336 atomic_set(&dev->notify_count, 0);
1337 atomic_set(&dev->online, 0);
1338 atomic_set(&dev->smd_ctl.rx_pkt, 0);
1339 atomic_set(&dev->smd_data.rx_pkt, 0);
1340
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301341 INIT_WORK(&dev->connect_work, rmnet_smd_connect_work);
1342 INIT_WORK(&dev->disconnect_work, rmnet_smd_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343
1344 tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
1345 (unsigned long) dev);
1346 tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
1347 (unsigned long) dev);
1348 tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
1349 (unsigned long) dev);
1350 tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
1351 (unsigned long) dev);
1352
1353 init_waitqueue_head(&dev->smd_ctl.wait);
1354 init_waitqueue_head(&dev->smd_data.wait);
1355
Manu Gautam4f6ca0e2012-01-31 13:57:20 +05301356 dev->pdrv.probe = rmnet_smd_ch_probe;
1357 dev->pdrv.driver.name = CONFIG_RMNET_SMD_CTL_CHANNEL;
1358 dev->pdrv.driver.owner = THIS_MODULE;
1359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 INIT_LIST_HEAD(&dev->qmi_req_pool);
1361 INIT_LIST_HEAD(&dev->qmi_req_q);
1362 INIT_LIST_HEAD(&dev->qmi_resp_pool);
1363 INIT_LIST_HEAD(&dev->qmi_resp_q);
1364 INIT_LIST_HEAD(&dev->rx_idle);
1365 INIT_LIST_HEAD(&dev->rx_queue);
1366 INIT_LIST_HEAD(&dev->tx_idle);
1367
1368 dev->function.name = "rmnet";
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301369 dev->function.strings = rmnet_smd_strings;
1370 dev->function.descriptors = rmnet_smd_fs_function;
1371 dev->function.hs_descriptors = rmnet_smd_hs_function;
1372 dev->function.bind = rmnet_smd_bind;
1373 dev->function.unbind = rmnet_smd_unbind;
1374 dev->function.setup = rmnet_smd_setup;
1375 dev->function.set_alt = rmnet_smd_set_alt;
1376 dev->function.disable = rmnet_smd_disable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377
1378 ret = usb_add_function(c, &dev->function);
1379 if (ret)
1380 goto free_wq;
1381
Manu Gautam1c8ffd72011-09-02 16:00:49 +05301382 rmnet_smd_debugfs_init(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383
1384 return 0;
1385
1386free_wq:
1387 destroy_workqueue(dev->wq);
1388free_dev:
1389 kfree(dev);
1390
1391 return ret;
1392}