blob: 80193563dd418fe0e86d299b252d4b11e8c2022f [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * f_rmnet_sdio.c -- RmNet SDIO function driver
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
7 * Copyright (C) 2008 Nokia Corporation
8 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/err.h>
29#include <linux/list.h>
30#include <linux/device.h>
31#include <linux/workqueue.h>
32#include <linux/netdevice.h>
33
34#include <linux/usb/cdc.h>
35#include <linux/usb/composite.h>
36#include <linux/usb/ch9.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037#include <linux/termios.h>
38#include <linux/debugfs.h>
39
40#include <mach/sdio_cmux.h>
41#include <mach/sdio_dmux.h>
42
Manu Gautam8e0719b2011-09-26 14:47:55 +053043#ifdef CONFIG_RMNET_SDIO_CTL_CHANNEL
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL;
Manu Gautam8e0719b2011-09-26 14:47:55 +053045#else
46static uint32_t rmnet_sdio_ctl_ch;
47#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
49MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
50
Manu Gautam8e0719b2011-09-26 14:47:55 +053051#ifdef CONFIG_RMNET_SDIO_DATA_CHANNEL
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL;
Manu Gautam8e0719b2011-09-26 14:47:55 +053053#else
54static uint32_t rmnet_sdio_data_ch;
55#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
57MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
58
Manu Gautam8e0719b2011-09-26 14:47:55 +053059#define ACM_CTRL_DTR (1 << 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060
Manu Gautam8e0719b2011-09-26 14:47:55 +053061#define SDIO_MUX_HDR 8
62#define RMNET_SDIO_NOTIFY_INTERVAL 5
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063#define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification)
64
Manu Gautam8e0719b2011-09-26 14:47:55 +053065#define RMNET_SDIO_RX_REQ_MAX 16
66#define RMNET_SDIO_RX_REQ_SIZE 2048
67#define RMNET_SDIO_TX_REQ_MAX 200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69#define TX_PKT_DROP_THRESHOLD 1000
70#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
71#define RX_PKT_FLOW_CTRL_DISABLE 500
72
Manu Gautam8e0719b2011-09-26 14:47:55 +053073unsigned int sdio_tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
74module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075
Manu Gautam8e0719b2011-09-26 14:47:55 +053076unsigned int sdio_rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
77module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078
Manu Gautam8e0719b2011-09-26 14:47:55 +053079unsigned int sdio_rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
80module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081
82/* QMI requests & responses buffer*/
83struct rmnet_sdio_qmi_buf {
84 void *buf;
85 int len;
86 struct list_head list;
87};
88
Manu Gautam8e0719b2011-09-26 14:47:55 +053089struct rmnet_sdio_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090 struct usb_function function;
91 struct usb_composite_dev *cdev;
92
93 struct usb_ep *epout;
94 struct usb_ep *epin;
95 struct usb_ep *epnotify;
96 struct usb_request *notify_req;
97
98 u8 ifc_id;
99 /* QMI lists */
100 struct list_head qmi_req_q;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700101 unsigned int qreq_q_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 struct list_head qmi_resp_q;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700103 unsigned int qresp_q_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 /* Tx/Rx lists */
105 struct list_head tx_idle;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700106 unsigned int tx_idle_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 struct sk_buff_head tx_skb_queue;
108 struct list_head rx_idle;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700109 unsigned int rx_idle_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sk_buff_head rx_skb_queue;
111
112 spinlock_t lock;
113 atomic_t online;
114 atomic_t notify_count;
115
116 struct workqueue_struct *wq;
117 struct work_struct disconnect_work;
118
119 struct work_struct ctl_rx_work;
120 struct work_struct data_rx_work;
121
122 struct delayed_work sdio_open_work;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700123 struct work_struct sdio_close_work;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530124#define RMNET_SDIO_CH_OPEN 1
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -0700125 unsigned long data_ch_status;
126 unsigned long ctrl_ch_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127
128 unsigned int dpkts_pending_atdmux;
129 int cbits_to_modem;
130 struct work_struct set_modem_ctl_bits_work;
131
132 /* pkt logging dpkt - data pkt; cpkt - control pkt*/
Manu Gautam8e0719b2011-09-26 14:47:55 +0530133 struct dentry *dent;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 unsigned long dpkt_tolaptop;
135 unsigned long dpkt_tomodem;
136 unsigned long tx_drp_cnt;
137 unsigned long cpkt_tolaptop;
138 unsigned long cpkt_tomodem;
139};
140
Manu Gautam8e0719b2011-09-26 14:47:55 +0530141static struct usb_interface_descriptor rmnet_sdio_interface_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 .bLength = USB_DT_INTERFACE_SIZE,
143 .bDescriptorType = USB_DT_INTERFACE,
144 /* .bInterfaceNumber = DYNAMIC */
145 .bNumEndpoints = 3,
146 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
147 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
148 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
149 /* .iInterface = DYNAMIC */
150};
151
152/* Full speed support */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530153static struct usb_endpoint_descriptor rmnet_sdio_fs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 .bLength = USB_DT_ENDPOINT_SIZE,
155 .bDescriptorType = USB_DT_ENDPOINT,
156 .bEndpointAddress = USB_DIR_IN,
157 .bmAttributes = USB_ENDPOINT_XFER_INT,
158 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
159 .bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL,
160};
161
Manu Gautam8e0719b2011-09-26 14:47:55 +0530162static struct usb_endpoint_descriptor rmnet_sdio_fs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163 .bLength = USB_DT_ENDPOINT_SIZE,
164 .bDescriptorType = USB_DT_ENDPOINT,
165 .bEndpointAddress = USB_DIR_IN,
166 .bmAttributes = USB_ENDPOINT_XFER_BULK,
167 .wMaxPacketSize = __constant_cpu_to_le16(64),
168};
169
Manu Gautam8e0719b2011-09-26 14:47:55 +0530170static struct usb_endpoint_descriptor rmnet_sdio_fs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 .bLength = USB_DT_ENDPOINT_SIZE,
172 .bDescriptorType = USB_DT_ENDPOINT,
173 .bEndpointAddress = USB_DIR_OUT,
174 .bmAttributes = USB_ENDPOINT_XFER_BULK,
175 .wMaxPacketSize = __constant_cpu_to_le16(64),
176};
177
Manu Gautam8e0719b2011-09-26 14:47:55 +0530178static struct usb_descriptor_header *rmnet_sdio_fs_function[] = {
179 (struct usb_descriptor_header *) &rmnet_sdio_interface_desc,
180 (struct usb_descriptor_header *) &rmnet_sdio_fs_notify_desc,
181 (struct usb_descriptor_header *) &rmnet_sdio_fs_in_desc,
182 (struct usb_descriptor_header *) &rmnet_sdio_fs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 NULL,
184};
185
186/* High speed support */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530187static struct usb_endpoint_descriptor rmnet_sdio_hs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 .bLength = USB_DT_ENDPOINT_SIZE,
189 .bDescriptorType = USB_DT_ENDPOINT,
190 .bEndpointAddress = USB_DIR_IN,
191 .bmAttributes = USB_ENDPOINT_XFER_INT,
192 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
193 .bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4,
194};
195
Manu Gautam8e0719b2011-09-26 14:47:55 +0530196static struct usb_endpoint_descriptor rmnet_sdio_hs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 .bLength = USB_DT_ENDPOINT_SIZE,
198 .bDescriptorType = USB_DT_ENDPOINT,
199 .bEndpointAddress = USB_DIR_IN,
200 .bmAttributes = USB_ENDPOINT_XFER_BULK,
201 .wMaxPacketSize = __constant_cpu_to_le16(512),
202};
203
Manu Gautam8e0719b2011-09-26 14:47:55 +0530204static struct usb_endpoint_descriptor rmnet_sdio_hs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 .bLength = USB_DT_ENDPOINT_SIZE,
206 .bDescriptorType = USB_DT_ENDPOINT,
207 .bEndpointAddress = USB_DIR_OUT,
208 .bmAttributes = USB_ENDPOINT_XFER_BULK,
209 .wMaxPacketSize = __constant_cpu_to_le16(512),
210};
211
Manu Gautam8e0719b2011-09-26 14:47:55 +0530212static struct usb_descriptor_header *rmnet_sdio_hs_function[] = {
213 (struct usb_descriptor_header *) &rmnet_sdio_interface_desc,
214 (struct usb_descriptor_header *) &rmnet_sdio_hs_notify_desc,
215 (struct usb_descriptor_header *) &rmnet_sdio_hs_in_desc,
216 (struct usb_descriptor_header *) &rmnet_sdio_hs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 NULL,
218};
219
220/* String descriptors */
221
Manu Gautam8e0719b2011-09-26 14:47:55 +0530222static struct usb_string rmnet_sdio_string_defs[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 [0].s = "QMI RmNet",
224 { } /* end of list */
225};
226
Manu Gautam8e0719b2011-09-26 14:47:55 +0530227static struct usb_gadget_strings rmnet_sdio_string_table = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 .language = 0x0409, /* en-us */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530229 .strings = rmnet_sdio_string_defs,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230};
231
Manu Gautam8e0719b2011-09-26 14:47:55 +0530232static struct usb_gadget_strings *rmnet_sdio_strings[] = {
233 &rmnet_sdio_string_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234 NULL,
235};
236
237static struct rmnet_sdio_qmi_buf *
Manu Gautam8e0719b2011-09-26 14:47:55 +0530238rmnet_sdio_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239
240{
241 struct rmnet_sdio_qmi_buf *qmi;
242
243 qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags);
244 if (qmi != NULL) {
245 qmi->buf = kmalloc(len, kmalloc_flags);
246 if (qmi->buf == NULL) {
247 kfree(qmi);
248 qmi = NULL;
249 }
250 }
251
252 return qmi ? qmi : ERR_PTR(-ENOMEM);
253}
254
Manu Gautam8e0719b2011-09-26 14:47:55 +0530255static void rmnet_sdio_free_qmi(struct rmnet_sdio_qmi_buf *qmi)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256{
257 kfree(qmi->buf);
258 kfree(qmi);
259}
260/*
261 * Allocate a usb_request and its buffer. Returns a pointer to the
262 * usb_request or a pointer with an error code if there is an error.
263 */
264static struct usb_request *
Manu Gautam8e0719b2011-09-26 14:47:55 +0530265rmnet_sdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266{
267 struct usb_request *req;
268
269 req = usb_ep_alloc_request(ep, kmalloc_flags);
270
271 if (len && req != NULL) {
272 req->length = len;
273 req->buf = kmalloc(len, kmalloc_flags);
274 if (req->buf == NULL) {
275 usb_ep_free_request(ep, req);
276 req = NULL;
277 }
278 }
279
280 return req ? req : ERR_PTR(-ENOMEM);
281}
282
283/*
284 * Free a usb_request and its buffer.
285 */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530286static void rmnet_sdio_free_req(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287{
288 kfree(req->buf);
289 usb_ep_free_request(ep, req);
290}
291
Manu Gautam8e0719b2011-09-26 14:47:55 +0530292static void rmnet_sdio_notify_complete(struct usb_ep *ep,
293 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530295 struct rmnet_sdio_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 struct usb_composite_dev *cdev = dev->cdev;
297 int status = req->status;
298
299 switch (status) {
300 case -ECONNRESET:
301 case -ESHUTDOWN:
302 /* connection gone */
303 atomic_set(&dev->notify_count, 0);
304 break;
305 default:
306 ERROR(cdev, "rmnet notifyep error %d\n", status);
307 /* FALLTHROUGH */
308 case 0:
309
Manu Gautam8e0719b2011-09-26 14:47:55 +0530310 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700311 return;
312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 /* handle multiple pending QMI_RESPONSE_AVAILABLE
314 * notifications by resending until we're done
315 */
316 if (atomic_dec_and_test(&dev->notify_count))
317 break;
318
319 status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
320 if (status) {
321 atomic_dec(&dev->notify_count);
322 ERROR(cdev, "rmnet notify ep enq error %d\n", status);
323 }
324 break;
325 }
326}
327
Manu Gautam8e0719b2011-09-26 14:47:55 +0530328static void rmnet_sdio_qmi_resp_available(struct rmnet_sdio_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329{
330 struct usb_composite_dev *cdev = dev->cdev;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -0700331 struct usb_cdc_notification *event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 int status;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -0700333 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334
335 /* Response will be sent later */
336 if (atomic_inc_return(&dev->notify_count) != 1)
337 return;
338
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -0700339 spin_lock_irqsave(&dev->lock, flags);
340
341 if (!atomic_read(&dev->online)) {
342 spin_unlock_irqrestore(&dev->lock, flags);
343 return;
344 }
345
346 event = dev->notify_req->buf;
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
349 | USB_RECIP_INTERFACE;
350 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
351 event->wValue = cpu_to_le16(0);
352 event->wIndex = cpu_to_le16(dev->ifc_id);
353 event->wLength = cpu_to_le16(0);
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -0700354 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355
356 status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
357 if (status < 0) {
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -0700358 if (atomic_read(&dev->online))
359 atomic_dec(&dev->notify_count);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
361 }
362}
363
Manu Gautam8e0719b2011-09-26 14:47:55 +0530364#define SDIO_MAX_CTRL_PKT_SIZE 4096
365static void rmnet_sdio_ctl_receive_cb(void *data, int size, void *priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530367 struct rmnet_sdio_dev *dev = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368 struct usb_composite_dev *cdev = dev->cdev;
369 struct rmnet_sdio_qmi_buf *qmi_resp;
370 unsigned long flags;
371
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700372 if (!data) {
373 pr_info("%s: cmux_ch close event\n", __func__);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530374 if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) &&
375 test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
376 clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
377 clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700378 queue_work(dev->wq, &dev->sdio_close_work);
379 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380 return;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700381 }
382
Manu Gautam8e0719b2011-09-26 14:47:55 +0530383 if (!size || !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700384 return;
385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386
Manu Gautam8e0719b2011-09-26 14:47:55 +0530387 if (size > SDIO_MAX_CTRL_PKT_SIZE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n",
Manu Gautam8e0719b2011-09-26 14:47:55 +0530389 size, SDIO_MAX_CTRL_PKT_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 return;
391 }
392
393 if (!atomic_read(&dev->online)) {
394 DBG(cdev, "USB disconnected\n");
395 return;
396 }
397
Manu Gautam8e0719b2011-09-26 14:47:55 +0530398 qmi_resp = rmnet_sdio_alloc_qmi(size, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 if (IS_ERR(qmi_resp)) {
400 DBG(cdev, "unable to allocate memory for QMI resp\n");
401 return;
402 }
403 memcpy(qmi_resp->buf, data, size);
404 qmi_resp->len = size;
405 spin_lock_irqsave(&dev->lock, flags);
406 list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700407 dev->qresp_q_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 spin_unlock_irqrestore(&dev->lock, flags);
409
Manu Gautam8e0719b2011-09-26 14:47:55 +0530410 rmnet_sdio_qmi_resp_available(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411}
412
Manu Gautam8e0719b2011-09-26 14:47:55 +0530413static void rmnet_sdio_ctl_write_done(void *data, int size, void *priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530415 struct rmnet_sdio_dev *dev = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 struct usb_composite_dev *cdev = dev->cdev;
417
418 VDBG(cdev, "rmnet control write done = %d bytes\n", size);
419}
420
Manu Gautam8e0719b2011-09-26 14:47:55 +0530421static void rmnet_sdio_sts_callback(int id, void *priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530423 struct rmnet_sdio_dev *dev = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 struct usb_composite_dev *cdev = dev->cdev;
425
Manu Gautam8e0719b2011-09-26 14:47:55 +0530426 DBG(cdev, "rmnet_sdio_sts_callback: id: %d\n", id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427}
428
Manu Gautam8e0719b2011-09-26 14:47:55 +0530429static void rmnet_sdio_control_rx_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530431 struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev,
432 ctl_rx_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 struct usb_composite_dev *cdev = dev->cdev;
434 struct rmnet_sdio_qmi_buf *qmi_req;
435 unsigned long flags;
436 int ret;
437
438 while (1) {
439 spin_lock_irqsave(&dev->lock, flags);
440 if (list_empty(&dev->qmi_req_q))
441 goto unlock;
442
443 qmi_req = list_first_entry(&dev->qmi_req_q,
444 struct rmnet_sdio_qmi_buf, list);
445 list_del(&qmi_req->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700446 dev->qreq_q_len--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 spin_unlock_irqrestore(&dev->lock, flags);
448
449 ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf,
450 qmi_req->len);
451 if (ret != qmi_req->len) {
452 ERROR(cdev, "rmnet control SDIO write failed\n");
453 return;
454 }
455
456 dev->cpkt_tomodem++;
457
458 /*
459 * cmux_write API copies the buffer and gives it to sdio_al.
460 * Hence freeing the memory before write is completed.
461 */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530462 rmnet_sdio_free_qmi(qmi_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463 }
464unlock:
465 spin_unlock_irqrestore(&dev->lock, flags);
466}
467
Manu Gautam8e0719b2011-09-26 14:47:55 +0530468static void rmnet_sdio_response_complete(struct usb_ep *ep,
469 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530471 struct rmnet_sdio_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 struct usb_composite_dev *cdev = dev->cdev;
473
474 switch (req->status) {
475 case -ECONNRESET:
476 case -ESHUTDOWN:
477 case 0:
478 return;
479 default:
480 INFO(cdev, "rmnet %s response error %d, %d/%d\n",
481 ep->name, req->status,
482 req->actual, req->length);
483 }
484}
485
Manu Gautam8e0719b2011-09-26 14:47:55 +0530486static void rmnet_sdio_command_complete(struct usb_ep *ep,
487 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530489 struct rmnet_sdio_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 struct usb_composite_dev *cdev = dev->cdev;
491 struct rmnet_sdio_qmi_buf *qmi_req;
492 int len = req->actual;
493
494 if (req->status < 0) {
495 ERROR(cdev, "rmnet command error %d\n", req->status);
496 return;
497 }
498
499 /* discard the packet if sdio is not available */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530500 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 return;
502
Manu Gautam8e0719b2011-09-26 14:47:55 +0530503 qmi_req = rmnet_sdio_alloc_qmi(len, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (IS_ERR(qmi_req)) {
505 ERROR(cdev, "unable to allocate memory for QMI req\n");
506 return;
507 }
508 memcpy(qmi_req->buf, req->buf, len);
509 qmi_req->len = len;
510 spin_lock(&dev->lock);
511 list_add_tail(&qmi_req->list, &dev->qmi_req_q);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700512 dev->qreq_q_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 spin_unlock(&dev->lock);
514 queue_work(dev->wq, &dev->ctl_rx_work);
515}
516
517static int
Manu Gautam8e0719b2011-09-26 14:47:55 +0530518rmnet_sdio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530520 struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
521 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 struct usb_composite_dev *cdev = f->config->cdev;
523 struct usb_request *req = cdev->req;
524 int ret = -EOPNOTSUPP;
525 u16 w_index = le16_to_cpu(ctrl->wIndex);
526 u16 w_value = le16_to_cpu(ctrl->wValue);
527 u16 w_length = le16_to_cpu(ctrl->wLength);
528 struct rmnet_sdio_qmi_buf *resp;
529
530 if (!atomic_read(&dev->online))
531 return -ENOTCONN;
532
533 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
534
535 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
536 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 ret = w_length;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530538 req->complete = rmnet_sdio_command_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 req->context = dev;
540 break;
541
542
543 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
544 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
545 if (w_value)
546 goto invalid;
547 else {
548 unsigned len;
549
550 spin_lock(&dev->lock);
551
552 if (list_empty(&dev->qmi_resp_q)) {
553 INFO(cdev, "qmi resp empty "
554 " req%02x.%02x v%04x i%04x l%d\n",
555 ctrl->bRequestType, ctrl->bRequest,
556 w_value, w_index, w_length);
557 spin_unlock(&dev->lock);
558 goto invalid;
559 }
560
561 resp = list_first_entry(&dev->qmi_resp_q,
562 struct rmnet_sdio_qmi_buf, list);
563 list_del(&resp->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700564 dev->qresp_q_len--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 spin_unlock(&dev->lock);
566
567 len = min_t(unsigned, w_length, resp->len);
568 memcpy(req->buf, resp->buf, len);
569 ret = len;
570 req->context = dev;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530571 req->complete = rmnet_sdio_response_complete;
572 rmnet_sdio_free_qmi(resp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573
574 /* check if its the right place to add */
575 dev->cpkt_tolaptop++;
576 }
577 break;
578 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
579 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
580 /* This is a workaround for RmNet and is borrowed from the
581 * CDC/ACM standard. The host driver will issue the above ACM
582 * standard request to the RmNet interface in the following
583 * scenario: Once the network adapter is disabled from device
584 * manager, the above request will be sent from the qcusbnet
585 * host driver, with DTR being '0'. Once network adapter is
586 * enabled from device manager (or during enumeration), the
587 * request will be sent with DTR being '1'.
588 */
589 if (w_value & ACM_CTRL_DTR)
590 dev->cbits_to_modem |= TIOCM_DTR;
591 else
592 dev->cbits_to_modem &= ~TIOCM_DTR;
593 queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
594
595 ret = 0;
596
597 break;
598 default:
599
600invalid:
601 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
602 ctrl->bRequestType, ctrl->bRequest,
603 w_value, w_index, w_length);
604 }
605
606 /* respond with data transfer or status phase? */
607 if (ret >= 0) {
608 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
609 ctrl->bRequestType, ctrl->bRequest,
610 w_value, w_index, w_length);
611 req->zero = (ret < w_length);
612 req->length = ret;
613 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
614 if (ret < 0)
615 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
616 }
617
618 return ret;
619}
620
621static int
Manu Gautam8e0719b2011-09-26 14:47:55 +0530622rmnet_sdio_rx_submit(struct rmnet_sdio_dev *dev, struct usb_request *req,
623 gfp_t gfp_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624{
625 struct sk_buff *skb;
626 int retval;
627
628 skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
629 if (skb == NULL)
630 return -ENOMEM;
631 skb_reserve(skb, SDIO_MUX_HDR);
632
633 req->buf = skb->data;
634 req->length = RMNET_SDIO_RX_REQ_SIZE;
635 req->context = skb;
636
637 retval = usb_ep_queue(dev->epout, req, gfp_flags);
638 if (retval)
639 dev_kfree_skb_any(skb);
640
641 return retval;
642}
643
Manu Gautam8e0719b2011-09-26 14:47:55 +0530644static void rmnet_sdio_start_rx(struct rmnet_sdio_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645{
646 struct usb_composite_dev *cdev = dev->cdev;
647 int status;
648 struct usb_request *req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 unsigned long flags;
650
651 if (!atomic_read(&dev->online)) {
652 pr_err("%s: USB not connected\n", __func__);
653 return;
654 }
655
656 spin_lock_irqsave(&dev->lock, flags);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700657 while (!list_empty(&dev->rx_idle)) {
658 req = list_first_entry(&dev->rx_idle, struct usb_request, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 list_del(&req->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700660 dev->rx_idle_len--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661
662 spin_unlock_irqrestore(&dev->lock, flags);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530663 status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664 spin_lock_irqsave(&dev->lock, flags);
665
666 if (status) {
667 ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
668 list_add_tail(&req->list, &dev->rx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700669 dev->rx_idle_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 break;
671 }
672 }
673 spin_unlock_irqrestore(&dev->lock, flags);
674}
675
Manu Gautam8e0719b2011-09-26 14:47:55 +0530676static void rmnet_sdio_start_tx(struct rmnet_sdio_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677{
678 unsigned long flags;
679 int status;
680 struct sk_buff *skb;
681 struct usb_request *req;
682 struct usb_composite_dev *cdev = dev->cdev;
683
684 if (!atomic_read(&dev->online))
685 return;
686
Manu Gautam8e0719b2011-09-26 14:47:55 +0530687 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status))
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700688 return;
689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 spin_lock_irqsave(&dev->lock, flags);
691 while (!list_empty(&dev->tx_idle)) {
692 skb = __skb_dequeue(&dev->tx_skb_queue);
693 if (!skb) {
694 spin_unlock_irqrestore(&dev->lock, flags);
695 return;
696 }
697
698 req = list_first_entry(&dev->tx_idle, struct usb_request, list);
699 req->context = skb;
700 req->buf = skb->data;
701 req->length = skb->len;
702
703 list_del(&req->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700704 dev->tx_idle_len--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 spin_unlock(&dev->lock);
706 status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
707 spin_lock(&dev->lock);
708 if (status) {
709 /* USB still online, queue requests back */
710 if (atomic_read(&dev->online)) {
711 ERROR(cdev, "rmnet tx data enqueue err %d\n",
712 status);
713 list_add_tail(&req->list, &dev->tx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700714 dev->tx_idle_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 __skb_queue_head(&dev->tx_skb_queue, skb);
716 } else {
717 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530718 rmnet_sdio_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 dev_kfree_skb_any(skb);
720 }
721 break;
722 }
723 dev->dpkt_tolaptop++;
724 }
725 spin_unlock_irqrestore(&dev->lock, flags);
726}
727
Manu Gautam8e0719b2011-09-26 14:47:55 +0530728static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530730 struct rmnet_sdio_dev *dev = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 unsigned long flags;
732
733 /* SDIO mux sends NULL SKB when link state changes */
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700734 if (!skb) {
735 pr_info("%s: dmux_ch close event\n", __func__);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530736 if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) &&
737 test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
738 clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
739 clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700740 queue_work(dev->wq, &dev->sdio_close_work);
741 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742 return;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700743 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744
745 if (!atomic_read(&dev->online)) {
746 dev_kfree_skb_any(skb);
747 return;
748 }
749
750 spin_lock_irqsave(&dev->lock, flags);
751
Manu Gautam8e0719b2011-09-26 14:47:55 +0530752 if (dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753 if (printk_ratelimit())
754 pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
755 __func__, dev->tx_drp_cnt);
756 dev->tx_drp_cnt++;
757 spin_unlock_irqrestore(&dev->lock, flags);
758 dev_kfree_skb_any(skb);
759 return;
760 }
761
762 __skb_queue_tail(&dev->tx_skb_queue, skb);
763 spin_unlock_irqrestore(&dev->lock, flags);
764
Manu Gautam8e0719b2011-09-26 14:47:55 +0530765 rmnet_sdio_start_tx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766}
767
Manu Gautam8e0719b2011-09-26 14:47:55 +0530768static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530770 struct rmnet_sdio_dev *dev = priv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771
772 /* SDIO mux sends NULL SKB when link state changes */
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700773 if (!skb) {
774 pr_info("%s: dmux_ch open event\n", __func__);
775 queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 return;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700777 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778
779 dev_kfree_skb_any(skb);
780 /* this function is called from
781 * sdio mux from spin_lock_irqsave
782 */
783 spin_lock(&dev->lock);
784 dev->dpkts_pending_atdmux--;
785
Manu Gautam8e0719b2011-09-26 14:47:55 +0530786 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) ||
787 dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 spin_unlock(&dev->lock);
789 return;
790 }
791 spin_unlock(&dev->lock);
792
Manu Gautam8e0719b2011-09-26 14:47:55 +0530793 rmnet_sdio_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794}
795
Manu Gautam8e0719b2011-09-26 14:47:55 +0530796static void rmnet_sdio_data_rx_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530798 struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev,
799 data_rx_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 struct usb_composite_dev *cdev = dev->cdev;
801 struct sk_buff *skb;
802 int ret;
803 unsigned long flags;
804
Manu Gautam8e0719b2011-09-26 14:47:55 +0530805 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700806 pr_info("%s: sdio data ch not open\n", __func__);
807 return;
808 }
809
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810 spin_lock_irqsave(&dev->lock, flags);
811 while ((skb = __skb_dequeue(&dev->rx_skb_queue))) {
812 spin_unlock_irqrestore(&dev->lock, flags);
813 ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
814 spin_lock_irqsave(&dev->lock, flags);
815 if (ret < 0) {
816 ERROR(cdev, "rmnet SDIO data write failed\n");
817 dev_kfree_skb_any(skb);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700818 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 } else {
820 dev->dpkt_tomodem++;
821 dev->dpkts_pending_atdmux++;
822 }
823 }
824 spin_unlock_irqrestore(&dev->lock, flags);
825}
826
Manu Gautam8e0719b2011-09-26 14:47:55 +0530827static void rmnet_sdio_complete_epout(struct usb_ep *ep,
828 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530830 struct rmnet_sdio_dev *dev = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831 struct usb_composite_dev *cdev = dev->cdev;
832 struct sk_buff *skb = req->context;
833 int status = req->status;
834 int queue = 0;
835
836 switch (status) {
837 case 0:
838 /* successful completion */
839 skb_put(skb, req->actual);
840 queue = 1;
841 break;
842 case -ECONNRESET:
843 case -ESHUTDOWN:
844 /* connection gone */
845 dev_kfree_skb_any(skb);
846 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530847 rmnet_sdio_free_req(ep, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848 return;
849 default:
850 /* unexpected failure */
851 ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
852 ep->name, status,
853 req->actual, req->length);
854 dev_kfree_skb_any(skb);
855 break;
856 }
857
Manu Gautam8e0719b2011-09-26 14:47:55 +0530858 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700859 pr_info("%s: sdio data ch not open\n", __func__);
860 dev_kfree_skb_any(skb);
861 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530862 rmnet_sdio_free_req(ep, req);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700863 return;
864 }
865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 spin_lock(&dev->lock);
867 if (queue) {
868 __skb_queue_tail(&dev->rx_skb_queue, skb);
869 queue_work(dev->wq, &dev->data_rx_work);
870 }
871
Manu Gautam8e0719b2011-09-26 14:47:55 +0530872 if (dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873 list_add_tail(&req->list, &dev->rx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700874 dev->rx_idle_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 spin_unlock(&dev->lock);
876 return;
877 }
878 spin_unlock(&dev->lock);
879
Manu Gautam8e0719b2011-09-26 14:47:55 +0530880 status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881 if (status) {
882 ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
883 list_add_tail(&req->list, &dev->rx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700884 dev->rx_idle_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 }
886}
887
Manu Gautam8e0719b2011-09-26 14:47:55 +0530888static void rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530890 struct rmnet_sdio_dev *dev = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 struct sk_buff *skb = req->context;
892 struct usb_composite_dev *cdev = dev->cdev;
893 int status = req->status;
894
895 switch (status) {
896 case 0:
897 /* successful completion */
898 case -ECONNRESET:
899 case -ESHUTDOWN:
900 /* connection gone */
901 break;
902 default:
903 ERROR(cdev, "rmnet data tx ep error %d\n", status);
904 break;
905 }
906
907 spin_lock(&dev->lock);
908 list_add_tail(&req->list, &dev->tx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700909 dev->tx_idle_len++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910 spin_unlock(&dev->lock);
911 dev_kfree_skb_any(skb);
912
Manu Gautam8e0719b2011-09-26 14:47:55 +0530913 rmnet_sdio_start_tx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914}
915
Manu Gautam8e0719b2011-09-26 14:47:55 +0530916static void rmnet_sdio_free_buf(struct rmnet_sdio_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917{
918 struct rmnet_sdio_qmi_buf *qmi;
919 struct usb_request *req;
920 struct list_head *act, *tmp;
921 struct sk_buff *skb;
922 unsigned long flags;
923
924
925 spin_lock_irqsave(&dev->lock, flags);
926
927 dev->dpkt_tolaptop = 0;
928 dev->dpkt_tomodem = 0;
929 dev->cpkt_tolaptop = 0;
930 dev->cpkt_tomodem = 0;
931 dev->dpkts_pending_atdmux = 0;
932 dev->tx_drp_cnt = 0;
933
934 /* free all usb requests in tx pool */
935 list_for_each_safe(act, tmp, &dev->tx_idle) {
936 req = list_entry(act, struct usb_request, list);
937 list_del(&req->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700938 dev->tx_idle_len--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530940 rmnet_sdio_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 }
942
943 /* free all usb requests in rx pool */
944 list_for_each_safe(act, tmp, &dev->rx_idle) {
945 req = list_entry(act, struct usb_request, list);
946 list_del(&req->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700947 dev->rx_idle_len--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530949 rmnet_sdio_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 }
951
952 /* free all buffers in qmi request pool */
953 list_for_each_safe(act, tmp, &dev->qmi_req_q) {
954 qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
955 list_del(&qmi->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700956 dev->qreq_q_len--;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530957 rmnet_sdio_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 }
959
960 /* free all buffers in qmi request pool */
961 list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
962 qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
963 list_del(&qmi->list);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -0700964 dev->qresp_q_len--;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530965 rmnet_sdio_free_qmi(qmi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 }
967
968 while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
969 dev_kfree_skb_any(skb);
970
971 while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
972 dev_kfree_skb_any(skb);
973
Manu Gautam8e0719b2011-09-26 14:47:55 +0530974 rmnet_sdio_free_req(dev->epnotify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
976 spin_unlock_irqrestore(&dev->lock, flags);
977}
978
Manu Gautam8e0719b2011-09-26 14:47:55 +0530979static void rmnet_sdio_set_modem_cbits_w(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530981 struct rmnet_sdio_dev *dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982
Manu Gautam8e0719b2011-09-26 14:47:55 +0530983 dev = container_of(w, struct rmnet_sdio_dev, set_modem_ctl_bits_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984
Manu Gautam8e0719b2011-09-26 14:47:55 +0530985 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 return;
987
988 pr_debug("%s: cbits_to_modem:%d\n",
989 __func__, dev->cbits_to_modem);
990
991 sdio_cmux_tiocmset(rmnet_sdio_ctl_ch,
992 dev->cbits_to_modem,
993 ~dev->cbits_to_modem);
994}
995
Manu Gautam8e0719b2011-09-26 14:47:55 +0530996static void rmnet_sdio_disconnect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997{
998 /* REVISIT: Push all the data to sdio if anythign is pending */
999}
Manu Gautam8e0719b2011-09-26 14:47:55 +05301000static void rmnet_sdio_suspend(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301002 struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
1003 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004
1005 if (!atomic_read(&dev->online))
1006 return;
1007 /* This is a workaround for Windows Host bug during suspend.
1008 * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
1009 * Since it is not beind done, Hence exclusively dropping the DTR
1010 * from function driver suspend.
1011 */
1012 dev->cbits_to_modem &= ~TIOCM_DTR;
1013 queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
1014}
Manu Gautam8e0719b2011-09-26 14:47:55 +05301015static void rmnet_sdio_disable(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301017 struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
1018 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001019
1020 if (!atomic_read(&dev->online))
1021 return;
1022
1023 usb_ep_disable(dev->epnotify);
1024 usb_ep_disable(dev->epout);
1025 usb_ep_disable(dev->epin);
1026
1027 atomic_set(&dev->online, 0);
1028 atomic_set(&dev->notify_count, 0);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301029 rmnet_sdio_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030
1031 /* cleanup work */
1032 queue_work(dev->wq, &dev->disconnect_work);
1033 dev->cbits_to_modem = 0;
1034 queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
1035}
1036
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001037static void rmnet_close_sdio_work(struct work_struct *w)
1038{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301039 struct rmnet_sdio_dev *dev;
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001040 unsigned long flags;
1041 struct usb_cdc_notification *event;
1042 int status;
1043 struct rmnet_sdio_qmi_buf *qmi;
1044 struct usb_request *req;
1045 struct sk_buff *skb;
1046
1047 pr_debug("%s:\n", __func__);
1048
Manu Gautam8e0719b2011-09-26 14:47:55 +05301049 dev = container_of(w, struct rmnet_sdio_dev, sdio_close_work);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001050
1051 if (!atomic_read(&dev->online))
1052 return;
1053
1054 usb_ep_fifo_flush(dev->epnotify);
1055
1056 spin_lock_irqsave(&dev->lock, flags);
1057 event = dev->notify_req->buf;
1058
1059 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
1060 | USB_RECIP_INTERFACE;
1061 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
1062 event->wValue = cpu_to_le16(0);
1063 event->wIndex = cpu_to_le16(dev->ifc_id);
1064 event->wLength = cpu_to_le16(0);
1065 spin_unlock_irqrestore(&dev->lock, flags);
1066
1067 status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_KERNEL);
1068 if (status < 0) {
1069 if (!atomic_read(&dev->online))
1070 return;
1071 pr_err("%s: rmnet notify ep enqueue error %d\n",
1072 __func__, status);
1073 }
1074
1075 usb_ep_fifo_flush(dev->epout);
1076 usb_ep_fifo_flush(dev->epin);
1077 cancel_work_sync(&dev->data_rx_work);
1078
1079 spin_lock_irqsave(&dev->lock, flags);
1080
1081 if (!atomic_read(&dev->online)) {
1082 spin_unlock_irqrestore(&dev->lock, flags);
1083 return;
1084 }
1085
1086 /* free all usb requests in tx pool */
1087 while (!list_empty(&dev->tx_idle)) {
1088 req = list_first_entry(&dev->tx_idle, struct usb_request, list);
1089 list_del(&req->list);
1090 dev->tx_idle_len--;
1091 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301092 rmnet_sdio_free_req(dev->epout, req);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001093 }
1094
1095 /* free all usb requests in rx pool */
1096 while (!list_empty(&dev->rx_idle)) {
1097 req = list_first_entry(&dev->rx_idle, struct usb_request, list);
1098 list_del(&req->list);
1099 dev->rx_idle_len--;
1100 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301101 rmnet_sdio_free_req(dev->epin, req);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001102 }
1103
1104 /* free all buffers in qmi request pool */
1105 while (!list_empty(&dev->qmi_req_q)) {
1106 qmi = list_first_entry(&dev->qmi_req_q,
1107 struct rmnet_sdio_qmi_buf, list);
1108 list_del(&qmi->list);
1109 dev->qreq_q_len--;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301110 rmnet_sdio_free_qmi(qmi);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001111 }
1112
1113 /* free all buffers in qmi response pool */
1114 while (!list_empty(&dev->qmi_resp_q)) {
1115 qmi = list_first_entry(&dev->qmi_resp_q,
1116 struct rmnet_sdio_qmi_buf, list);
1117 list_del(&qmi->list);
1118 dev->qresp_q_len--;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301119 rmnet_sdio_free_qmi(qmi);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001120 }
1121 atomic_set(&dev->notify_count, 0);
1122
1123 pr_info("%s: setting notify count to zero\n", __func__);
1124
1125
1126 while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
1127 dev_kfree_skb_any(skb);
1128
1129 while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
1130 dev_kfree_skb_any(skb);
1131 spin_unlock_irqrestore(&dev->lock, flags);
1132}
1133
Manu Gautam8e0719b2011-09-26 14:47:55 +05301134static int rmnet_sdio_start_io(struct rmnet_sdio_dev *dev)
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001135{
1136 struct usb_request *req;
1137 int ret, i;
1138 unsigned long flags;
1139
1140 spin_lock_irqsave(&dev->lock, flags);
1141 if (!atomic_read(&dev->online)) {
1142 spin_unlock_irqrestore(&dev->lock, flags);
1143 return 0;
1144 }
1145
Manu Gautam8e0719b2011-09-26 14:47:55 +05301146 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) ||
1147 !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001148 spin_unlock_irqrestore(&dev->lock, flags);
1149 return 0;
1150 }
1151
1152 for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301153 req = rmnet_sdio_alloc_req(dev->epout, 0, GFP_ATOMIC);
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001154 if (IS_ERR(req)) {
1155 ret = PTR_ERR(req);
1156 spin_unlock_irqrestore(&dev->lock, flags);
1157 goto free_buf;
1158 }
Manu Gautam8e0719b2011-09-26 14:47:55 +05301159 req->complete = rmnet_sdio_complete_epout;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001160 list_add_tail(&req->list, &dev->rx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001161 dev->rx_idle_len++;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001162 }
1163 for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301164 req = rmnet_sdio_alloc_req(dev->epin, 0, GFP_ATOMIC);
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001165 if (IS_ERR(req)) {
1166 ret = PTR_ERR(req);
1167 spin_unlock_irqrestore(&dev->lock, flags);
1168 goto free_buf;
1169 }
Manu Gautam8e0719b2011-09-26 14:47:55 +05301170 req->complete = rmnet_sdio_complete_epin;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001171 list_add_tail(&req->list, &dev->tx_idle);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001172 dev->tx_idle_len++;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001173 }
1174 spin_unlock_irqrestore(&dev->lock, flags);
1175
1176 /* Queue Rx data requests */
Manu Gautam8e0719b2011-09-26 14:47:55 +05301177 rmnet_sdio_start_rx(dev);
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001178
1179 return 0;
1180
1181free_buf:
Manu Gautam8e0719b2011-09-26 14:47:55 +05301182 rmnet_sdio_free_buf(dev);
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001183 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
1184 return ret;
1185}
1186
1187
Manu Gautam8e0719b2011-09-26 14:47:55 +05301188#define RMNET_SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000)
1189#define SDIO_SDIO_OPEN_MAX_RETRY 90
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190static void rmnet_open_sdio_work(struct work_struct *w)
1191{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301192 struct rmnet_sdio_dev *dev =
1193 container_of(w, struct rmnet_sdio_dev,
1194 sdio_open_work.work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 struct usb_composite_dev *cdev = dev->cdev;
1196 int ret;
1197 static int retry_cnt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198
Manu Gautam8e0719b2011-09-26 14:47:55 +05301199 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200 /* Control channel for QMI messages */
Manu Gautam8e0719b2011-09-26 14:47:55 +05301201 ret = sdio_cmux_open(rmnet_sdio_ctl_ch,
1202 rmnet_sdio_ctl_receive_cb,
1203 rmnet_sdio_ctl_write_done,
1204 rmnet_sdio_sts_callback, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 if (!ret)
Manu Gautam8e0719b2011-09-26 14:47:55 +05301206 set_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207 }
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001208
Manu Gautam8e0719b2011-09-26 14:47:55 +05301209 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 /* Data channel for network packets */
1211 ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
Manu Gautam8e0719b2011-09-26 14:47:55 +05301212 rmnet_sdio_data_receive_cb,
1213 rmnet_sdio_data_write_done);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 if (!ret)
Manu Gautam8e0719b2011-09-26 14:47:55 +05301215 set_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 }
1217
Manu Gautam8e0719b2011-09-26 14:47:55 +05301218 if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) &&
1219 test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001220
Manu Gautam8e0719b2011-09-26 14:47:55 +05301221 rmnet_sdio_start_io(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222
1223 /* if usb cable is connected, update DTR status to modem */
1224 if (atomic_read(&dev->online))
1225 queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
1226
1227 pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
1228 __func__, retry_cnt);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001229 retry_cnt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230 return;
1231 }
1232
1233 retry_cnt++;
1234 pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n",
1235 __func__, retry_cnt);
1236
Manu Gautam8e0719b2011-09-26 14:47:55 +05301237 if (retry_cnt > SDIO_SDIO_OPEN_MAX_RETRY) {
1238 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 ERROR(cdev, "Unable to open control SDIO channel\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240
Manu Gautam8e0719b2011-09-26 14:47:55 +05301241 if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 ERROR(cdev, "Unable to open DATA SDIO channel\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243
1244 } else {
1245 queue_delayed_work(dev->wq, &dev->sdio_open_work,
Manu Gautam8e0719b2011-09-26 14:47:55 +05301246 RMNET_SDIO_OPEN_RETRY_DELAY);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 }
1248}
1249
Manu Gautam8e0719b2011-09-26 14:47:55 +05301250static int rmnet_sdio_set_alt(struct usb_function *f,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 unsigned intf, unsigned alt)
1252{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301253 struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
1254 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255 struct usb_composite_dev *cdev = dev->cdev;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001256 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001258 /* Enable epin */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 dev->epin->driver_data = dev;
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001260 ret = config_ep_by_speed(cdev->gadget, f, dev->epin);
1261 if (ret) {
1262 dev->epin->desc = NULL;
1263 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1264 dev->epin->name, ret);
1265 return ret;
1266 }
1267 ret = usb_ep_enable(dev->epin);
1268 if (ret) {
1269 ERROR(cdev, "can't enable %s, result %d\n",
1270 dev->epin->name, ret);
1271 return ret;
1272 }
1273
1274 /* Enable epout */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 dev->epout->driver_data = dev;
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001276 ret = config_ep_by_speed(cdev->gadget, f, dev->epout);
1277 if (ret) {
1278 dev->epout->desc = NULL;
1279 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1280 dev->epout->name, ret);
1281 usb_ep_disable(dev->epin);
1282 return ret;
1283 }
1284 ret = usb_ep_enable(dev->epout);
1285 if (ret) {
1286 ERROR(cdev, "can't enable %s, result %d\n",
1287 dev->epout->name, ret);
1288 usb_ep_disable(dev->epin);
1289 return ret;
1290 }
1291
1292 /* Enable epnotify */
1293 ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify);
1294 if (ret) {
1295 dev->epnotify->desc = NULL;
1296 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1297 dev->epnotify->name, ret);
1298 usb_ep_disable(dev->epin);
1299 usb_ep_disable(dev->epout);
1300 return ret;
1301 }
1302 ret = usb_ep_enable(dev->epnotify);
1303 if (ret) {
1304 ERROR(cdev, "can't enable %s, result %d\n",
1305 dev->epnotify->name, ret);
1306 usb_ep_disable(dev->epin);
1307 usb_ep_disable(dev->epout);
1308 return ret;
1309 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001311 /* allocate notification */
Manu Gautam8e0719b2011-09-26 14:47:55 +05301312 dev->notify_req = rmnet_sdio_alloc_req(dev->epnotify,
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001313 RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
1314
1315 if (IS_ERR(dev->notify_req)) {
1316 ret = PTR_ERR(dev->notify_req);
1317 pr_err("%s: unable to allocate memory for notify ep\n",
1318 __func__);
1319 return ret;
1320 }
Manu Gautam8e0719b2011-09-26 14:47:55 +05301321 dev->notify_req->complete = rmnet_sdio_notify_complete;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001322 dev->notify_req->context = dev;
1323 dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
1324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 atomic_set(&dev->online, 1);
1326
Manu Gautam8e0719b2011-09-26 14:47:55 +05301327 ret = rmnet_sdio_start_io(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 return ret;
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331}
1332
Manu Gautam8e0719b2011-09-26 14:47:55 +05301333static int rmnet_sdio_bind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334{
1335 struct usb_composite_dev *cdev = c->cdev;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301336 struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
1337 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 int id;
1339 struct usb_ep *ep;
1340
1341 dev->cdev = cdev;
1342
1343 /* allocate interface ID */
1344 id = usb_interface_id(c, f);
1345 if (id < 0)
1346 return id;
1347 dev->ifc_id = id;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301348 rmnet_sdio_interface_desc.bInterfaceNumber = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349
Manu Gautam8e0719b2011-09-26 14:47:55 +05301350 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_in_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351 if (!ep)
1352 goto out;
1353 ep->driver_data = cdev; /* claim endpoint */
1354 dev->epin = ep;
1355
Manu Gautam8e0719b2011-09-26 14:47:55 +05301356 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_out_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357 if (!ep)
1358 goto out;
1359 ep->driver_data = cdev; /* claim endpoint */
1360 dev->epout = ep;
1361
Manu Gautam8e0719b2011-09-26 14:47:55 +05301362 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 if (!ep)
1364 goto out;
1365 ep->driver_data = cdev; /* claim endpoint */
1366 dev->epnotify = ep;
1367
1368 /* support all relevant hardware speeds... we expect that when
1369 * hardware is dual speed, all bulk-capable endpoints work at
1370 * both speeds
1371 */
1372 if (gadget_is_dualspeed(c->cdev->gadget)) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301373 rmnet_sdio_hs_in_desc.bEndpointAddress =
1374 rmnet_sdio_fs_in_desc.bEndpointAddress;
1375 rmnet_sdio_hs_out_desc.bEndpointAddress =
1376 rmnet_sdio_fs_out_desc.bEndpointAddress;
1377 rmnet_sdio_hs_notify_desc.bEndpointAddress =
1378 rmnet_sdio_fs_notify_desc.bEndpointAddress;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001379 }
1380
1381 queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
1382
1383 return 0;
1384
1385out:
1386 if (dev->epnotify)
1387 dev->epnotify->driver_data = NULL;
1388 if (dev->epout)
1389 dev->epout->driver_data = NULL;
1390 if (dev->epin)
1391 dev->epin->driver_data = NULL;
1392
1393 return -ENODEV;
1394}
1395
1396static void
Manu Gautam8e0719b2011-09-26 14:47:55 +05301397rmnet_sdio_unbind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301399 struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
1400 function);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401
Manu Gautam18be30b2011-11-01 15:53:17 +05301402 cancel_delayed_work_sync(&dev->sdio_open_work);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301403 destroy_workqueue(dev->wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404
Manu Gautam8e0719b2011-09-26 14:47:55 +05301405 dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406
Manu Gautam18be30b2011-11-01 15:53:17 +05301407 if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
1408 msm_sdio_dmux_close(rmnet_sdio_data_ch);
1409 clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
1410 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411
Manu Gautam18be30b2011-11-01 15:53:17 +05301412 if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
1413 sdio_cmux_close(rmnet_sdio_ctl_ch);
1414 clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
1415 }
Manu Gautam8e0719b2011-09-26 14:47:55 +05301416
1417 debugfs_remove_recursive(dev->dent);
1418
1419 kfree(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420}
1421
1422#if defined(CONFIG_DEBUG_FS)
Manu Gautam8e0719b2011-09-26 14:47:55 +05301423static ssize_t rmnet_sdio_read_stats(struct file *file, char __user *ubuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424 size_t count, loff_t *ppos)
1425{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301426 struct rmnet_sdio_dev *dev = file->private_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427 char *buf;
1428 unsigned long flags;
1429 int ret;
1430
1431 buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL);
1432 if (!buf)
1433 return -ENOMEM;
1434
1435 spin_lock_irqsave(&dev->lock, flags);
1436 ret = scnprintf(buf, PAGE_SIZE,
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001437 "-*-DATA-*-\n"
1438 "dpkts_tohost:%lu epInPool:%u tx_size:%u drp_cnt:%lu\n"
1439 "dpkts_tomodem:%lu epOutPool:%u rx_size:%u pending:%u\n"
1440 "-*-QMI-*-\n"
1441 "cpkts_tomodem:%lu qmi_req_q:%u cbits:%d\n"
1442 "cpkts_tolaptop:%lu qmi_resp_q:%u notify_cnt:%d\n"
1443 "-*-MISC-*-\n"
1444 "data_ch_status: %lu ctrl_ch_status: %lu\n",
1445 /* data */
1446 dev->dpkt_tolaptop, dev->tx_idle_len,
1447 dev->tx_skb_queue.qlen, dev->tx_drp_cnt,
1448 dev->dpkt_tomodem, dev->rx_idle_len,
1449 dev->rx_skb_queue.qlen, dev->dpkts_pending_atdmux,
1450 /* qmi */
1451 dev->cpkt_tomodem, dev->qreq_q_len,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 dev->cbits_to_modem,
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001453 dev->cpkt_tolaptop, dev->qresp_q_len,
1454 atomic_read(&dev->notify_count),
1455 /* misc */
Vamsi Krishnaf2085cd2011-07-19 16:28:24 -07001456 dev->data_ch_status, dev->ctrl_ch_status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457
1458 spin_unlock_irqrestore(&dev->lock, flags);
1459
1460 ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
1461
1462 kfree(buf);
1463
1464 return ret;
1465}
1466
Manu Gautam8e0719b2011-09-26 14:47:55 +05301467static ssize_t rmnet_sdio_reset_stats(struct file *file, const char __user *buf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 size_t count, loff_t *ppos)
1469{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301470 struct rmnet_sdio_dev *dev = file->private_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471
1472 dev->dpkt_tolaptop = 0;
1473 dev->dpkt_tomodem = 0;
1474 dev->cpkt_tolaptop = 0;
1475 dev->cpkt_tomodem = 0;
1476 dev->dpkts_pending_atdmux = 0;
1477 dev->tx_drp_cnt = 0;
1478
1479 /* TBD: How do we reset skb qlen
1480 * it might have side effects
1481 */
1482
1483 return count;
1484}
1485
Manu Gautam8e0719b2011-09-26 14:47:55 +05301486static int debug_rmnet_sdio_open(struct inode *inode, struct file *file)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487{
1488 file->private_data = inode->i_private;
1489
1490 return 0;
1491}
1492
Manu Gautam8e0719b2011-09-26 14:47:55 +05301493const struct file_operations debug_rmnet_sdio_stats_ops = {
1494 .open = debug_rmnet_sdio_open,
1495 .read = rmnet_sdio_read_stats,
1496 .write = rmnet_sdio_reset_stats,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001497};
1498
Manu Gautam8e0719b2011-09-26 14:47:55 +05301499static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500{
Manu Gautam18be30b2011-11-01 15:53:17 +05301501 dev->dent = debugfs_create_dir("usb_rmnet_sdio", 0);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301502 if (IS_ERR(dev->dent))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 return;
1504
Manu Gautam8e0719b2011-09-26 14:47:55 +05301505 debugfs_create_file("status", 0444, dev->dent, dev,
1506 &debug_rmnet_sdio_stats_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507}
1508#else
Manu Gautam8e0719b2011-09-26 14:47:55 +05301509static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510{
1511 return;
1512}
1513#endif
1514
1515int rmnet_sdio_function_add(struct usb_configuration *c)
1516{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301517 struct rmnet_sdio_dev *dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 int ret;
1519
1520 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1521 if (!dev)
1522 return -ENOMEM;
1523
1524 dev->wq = create_singlethread_workqueue("k_rmnet_work");
1525 if (!dev->wq) {
1526 ret = -ENOMEM;
1527 goto free_dev;
1528 }
1529
1530 spin_lock_init(&dev->lock);
1531 atomic_set(&dev->notify_count, 0);
1532 atomic_set(&dev->online, 0);
1533
Manu Gautam8e0719b2011-09-26 14:47:55 +05301534 INIT_WORK(&dev->disconnect_work, rmnet_sdio_disconnect_work);
1535 INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_sdio_set_modem_cbits_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536
Manu Gautam8e0719b2011-09-26 14:47:55 +05301537 INIT_WORK(&dev->ctl_rx_work, rmnet_sdio_control_rx_work);
1538 INIT_WORK(&dev->data_rx_work, rmnet_sdio_data_rx_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001539
1540 INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work);
Vamsi Krishna7a54b4c2011-07-19 18:46:09 -07001541 INIT_WORK(&dev->sdio_close_work, rmnet_close_sdio_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542
1543 INIT_LIST_HEAD(&dev->qmi_req_q);
1544 INIT_LIST_HEAD(&dev->qmi_resp_q);
1545
1546 INIT_LIST_HEAD(&dev->rx_idle);
1547 INIT_LIST_HEAD(&dev->tx_idle);
1548 skb_queue_head_init(&dev->tx_skb_queue);
1549 skb_queue_head_init(&dev->rx_skb_queue);
1550
1551 dev->function.name = "rmnet_sdio";
Manu Gautam8e0719b2011-09-26 14:47:55 +05301552 dev->function.strings = rmnet_sdio_strings;
1553 dev->function.descriptors = rmnet_sdio_fs_function;
1554 dev->function.hs_descriptors = rmnet_sdio_hs_function;
1555 dev->function.bind = rmnet_sdio_bind;
1556 dev->function.unbind = rmnet_sdio_unbind;
1557 dev->function.setup = rmnet_sdio_setup;
1558 dev->function.set_alt = rmnet_sdio_set_alt;
1559 dev->function.disable = rmnet_sdio_disable;
1560 dev->function.suspend = rmnet_sdio_suspend;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001561
1562 ret = usb_add_function(c, &dev->function);
1563 if (ret)
1564 goto free_wq;
1565
Manu Gautam8e0719b2011-09-26 14:47:55 +05301566 rmnet_sdio_debugfs_init(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567
1568 return 0;
1569
1570free_wq:
1571 destroy_workqueue(dev->wq);
1572free_dev:
1573 kfree(dev);
1574
1575 return ret;
1576}