blob: aa6c99a3fa03df046568abc5902f8f166be1d050 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * f_rmnet_smd_sdio.c -- RmNet SMD & SDIO function driver
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
7 * Copyright (C) 2008 Nokia Corporation
Duy Truong790f06d2013-02-13 16:38:12 -08008 * Copyright (c) 2011 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 and
12 * only version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/err.h>
24#include <linux/list.h>
25#include <linux/device.h>
26#include <linux/workqueue.h>
27#include <linux/netdevice.h>
28#include <linux/interrupt.h>
Manu Gautam8e0719b2011-09-26 14:47:55 +053029#include <linux/ratelimit.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31#include <linux/fs.h>
32#include <linux/miscdevice.h>
33#include <linux/uaccess.h>
34#include <asm/ioctls.h>
35
36#include <linux/usb/cdc.h>
37#include <linux/usb/composite.h>
38#include <linux/usb/ch9.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <linux/termios.h>
40#include <linux/debugfs.h>
41
42#include <mach/msm_smd.h>
43#include <mach/sdio_cmux.h>
44#include <mach/sdio_dmux.h>
Hemant Kumar1b820d52011-11-03 15:08:28 -070045#include <mach/usb_gadget_xport.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Manu Gautam8e0719b2011-09-26 14:47:55 +053047#ifdef CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL
48static uint32_t rmnet_mux_sdio_ctl_ch = CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL;
49#else
50static uint32_t rmnet_mux_sdio_ctl_ch;
51#endif
52module_param(rmnet_mux_sdio_ctl_ch, uint, S_IRUGO);
53MODULE_PARM_DESC(rmnet_mux_sdio_ctl_ch, "RmNetMUX control SDIO channel ID");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Manu Gautam8e0719b2011-09-26 14:47:55 +053055#ifdef CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL
56static uint32_t rmnet_mux_sdio_data_ch = CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL;
57#else
58static uint32_t rmnet_mux_sdio_data_ch;
59#endif
60module_param(rmnet_mux_sdio_data_ch, uint, S_IRUGO);
61MODULE_PARM_DESC(rmnet_mux_sdio_data_ch, "RmNetMUX data SDIO channel ID");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Manu Gautam8e0719b2011-09-26 14:47:55 +053063#ifdef CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL
64static char *rmnet_mux_smd_data_ch = CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL;
65#else
66static char *rmnet_mux_smd_data_ch;
67#endif
68module_param(rmnet_mux_smd_data_ch, charp, S_IRUGO);
69MODULE_PARM_DESC(rmnet_mux_smd_data_ch, "RmNetMUX data SMD channel");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Manu Gautam8e0719b2011-09-26 14:47:55 +053071#define RMNET_MUX_ACM_CTRL_DTR (1 << 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
Manu Gautam8e0719b2011-09-26 14:47:55 +053073#define RMNET_MUX_SDIO_HDR 8
74#define RMNET_MUX_SDIO_NOTIFY_INTERVAL 5
75#define RMNET_MUX_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
Manu Gautam8e0719b2011-09-26 14:47:55 +053077#define RMNET_MUX_SDIO_RX_REQ_MAX 16
78#define RMNET_MUX_SDIO_RX_REQ_SIZE 2048
79#define RMNET_MUX_SDIO_TX_REQ_MAX 100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
Manu Gautam8e0719b2011-09-26 14:47:55 +053081#define RMNET_MUX_SDIO_TX_LIMIT 1000
82#define RMNET_MUX_SDIO_RX_ENABLE_LIMIT 1000
83#define RMNET_MUX_SDIO_RX_DISABLE_LIMIT 500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084
Manu Gautam8e0719b2011-09-26 14:47:55 +053085static uint32_t mux_sdio_tx_pkt_drop_thld = RMNET_MUX_SDIO_TX_LIMIT;
86module_param(mux_sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087
Manu Gautam8e0719b2011-09-26 14:47:55 +053088static uint32_t mux_sdio_rx_fctrl_en_thld =
89 RMNET_MUX_SDIO_RX_ENABLE_LIMIT;
90module_param(mux_sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091
Manu Gautam8e0719b2011-09-26 14:47:55 +053092static uint32_t mux_sdio_rx_fctrl_dis_thld = RMNET_MUX_SDIO_RX_DISABLE_LIMIT;
93module_param(mux_sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094
95
Manu Gautam8e0719b2011-09-26 14:47:55 +053096#define RMNET_MUX_SMD_RX_REQ_MAX 8
97#define RMNET_MUX_SMD_RX_REQ_SIZE 2048
98#define RMNET_MUX_SMD_TX_REQ_MAX 8
99#define RMNET_MUX_SMD_TX_REQ_SIZE 2048
100#define RMNET_MUX_SMD_TXN_MAX 2048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101
Manu Gautam8e0719b2011-09-26 14:47:55 +0530102struct rmnet_mux_ctrl_pkt {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103 void *buf;
104 int len;
105 struct list_head list;
106};
107
Manu Gautam8e0719b2011-09-26 14:47:55 +0530108struct rmnet_mux_ctrl_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109 struct list_head tx_q;
110 wait_queue_head_t tx_wait_q;
111 unsigned long tx_len;
112
113 struct list_head rx_q;
114 unsigned long rx_len;
115
116 unsigned long cbits_to_modem;
117
118 unsigned opened;
119};
120
Manu Gautam8e0719b2011-09-26 14:47:55 +0530121struct rmnet_mux_sdio_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 /* Tx/Rx lists */
123 struct list_head tx_idle;
124 struct sk_buff_head tx_skb_queue;
125 struct list_head rx_idle;
126 struct sk_buff_head rx_skb_queue;
127
128
129
130 struct work_struct data_rx_work;
131
132 struct delayed_work open_work;
133 atomic_t sdio_open;
134
135 unsigned int dpkts_pending_atdmux;
136};
137
138/* Data SMD channel */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530139struct rmnet_mux_smd_info {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140 struct smd_channel *ch;
141 struct tasklet_struct tx_tlet;
142 struct tasklet_struct rx_tlet;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530143#define RMNET_MUX_CH_OPENED 0
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 unsigned long flags;
145 /* pending rx packet length */
146 atomic_t rx_pkt;
147 /* wait for smd open event*/
148 wait_queue_head_t wait;
149};
150
Manu Gautam8e0719b2011-09-26 14:47:55 +0530151struct rmnet_mux_smd_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152 /* Tx/Rx lists */
153 struct list_head tx_idle;
154 struct list_head rx_idle;
155 struct list_head rx_queue;
156
Manu Gautam8e0719b2011-09-26 14:47:55 +0530157 struct rmnet_mux_smd_info smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158};
159
Manu Gautam8e0719b2011-09-26 14:47:55 +0530160struct rmnet_mux_dev {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161 struct usb_function function;
162 struct usb_composite_dev *cdev;
163
164 struct usb_ep *epout;
165 struct usb_ep *epin;
166 struct usb_ep *epnotify;
167 struct usb_request *notify_req;
168
Manu Gautam8e0719b2011-09-26 14:47:55 +0530169 struct rmnet_mux_smd_dev smd_dev;
170 struct rmnet_mux_sdio_dev sdio_dev;
171 struct rmnet_mux_ctrl_dev ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172
173 u8 ifc_id;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700174 enum transport_type xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 spinlock_t lock;
176 atomic_t online;
177 atomic_t notify_count;
178 struct workqueue_struct *wq;
179 struct work_struct disconnect_work;
180
181 /* pkt counters */
182 unsigned long dpkts_tomsm;
183 unsigned long dpkts_tomdm;
184 unsigned long dpkts_tolaptop;
185 unsigned long tx_drp_cnt;
186 unsigned long cpkts_tolaptop;
187 unsigned long cpkts_tomdm;
188 unsigned long cpkts_drp_cnt;
189};
190
Manu Gautam8e0719b2011-09-26 14:47:55 +0530191static struct rmnet_mux_dev *rmux_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192
Manu Gautam8e0719b2011-09-26 14:47:55 +0530193static struct usb_interface_descriptor rmnet_mux_interface_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 .bLength = USB_DT_INTERFACE_SIZE,
195 .bDescriptorType = USB_DT_INTERFACE,
196 .bNumEndpoints = 3,
197 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
198 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
199 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
200};
201
202/* Full speed support */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530203static struct usb_endpoint_descriptor rmnet_mux_fs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 .bLength = USB_DT_ENDPOINT_SIZE,
205 .bDescriptorType = USB_DT_ENDPOINT,
206 .bEndpointAddress = USB_DIR_IN,
207 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam8e0719b2011-09-26 14:47:55 +0530208 .wMaxPacketSize = __constant_cpu_to_le16(
209 RMNET_MUX_SDIO_MAX_NFY_SZE),
210 .bInterval = 1 << RMNET_MUX_SDIO_NOTIFY_INTERVAL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211};
212
Manu Gautam8e0719b2011-09-26 14:47:55 +0530213static struct usb_endpoint_descriptor rmnet_mux_fs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 .bLength = USB_DT_ENDPOINT_SIZE,
215 .bDescriptorType = USB_DT_ENDPOINT,
216 .bEndpointAddress = USB_DIR_IN,
217 .bmAttributes = USB_ENDPOINT_XFER_BULK,
218 .wMaxPacketSize = __constant_cpu_to_le16(64),
219};
220
Manu Gautam8e0719b2011-09-26 14:47:55 +0530221static struct usb_endpoint_descriptor rmnet_mux_fs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 .bLength = USB_DT_ENDPOINT_SIZE,
223 .bDescriptorType = USB_DT_ENDPOINT,
224 .bEndpointAddress = USB_DIR_OUT,
225 .bmAttributes = USB_ENDPOINT_XFER_BULK,
226 .wMaxPacketSize = __constant_cpu_to_le16(64),
227};
228
Manu Gautam8e0719b2011-09-26 14:47:55 +0530229static struct usb_descriptor_header *rmnet_mux_fs_function[] = {
230 (struct usb_descriptor_header *) &rmnet_mux_interface_desc,
231 (struct usb_descriptor_header *) &rmnet_mux_fs_notify_desc,
232 (struct usb_descriptor_header *) &rmnet_mux_fs_in_desc,
233 (struct usb_descriptor_header *) &rmnet_mux_fs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234 NULL,
235};
236
237/* High speed support */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530238static struct usb_endpoint_descriptor rmnet_mux_hs_notify_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 .bLength = USB_DT_ENDPOINT_SIZE,
240 .bDescriptorType = USB_DT_ENDPOINT,
241 .bEndpointAddress = USB_DIR_IN,
242 .bmAttributes = USB_ENDPOINT_XFER_INT,
Manu Gautam8e0719b2011-09-26 14:47:55 +0530243 .wMaxPacketSize = __constant_cpu_to_le16(
244 RMNET_MUX_SDIO_MAX_NFY_SZE),
245 .bInterval = RMNET_MUX_SDIO_NOTIFY_INTERVAL + 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246};
247
Manu Gautam8e0719b2011-09-26 14:47:55 +0530248static struct usb_endpoint_descriptor rmnet_mux_hs_in_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249 .bLength = USB_DT_ENDPOINT_SIZE,
250 .bDescriptorType = USB_DT_ENDPOINT,
251 .bEndpointAddress = USB_DIR_IN,
252 .bmAttributes = USB_ENDPOINT_XFER_BULK,
253 .wMaxPacketSize = __constant_cpu_to_le16(512),
254};
255
Manu Gautam8e0719b2011-09-26 14:47:55 +0530256static struct usb_endpoint_descriptor rmnet_mux_hs_out_desc = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 .bLength = USB_DT_ENDPOINT_SIZE,
258 .bDescriptorType = USB_DT_ENDPOINT,
259 .bEndpointAddress = USB_DIR_OUT,
260 .bmAttributes = USB_ENDPOINT_XFER_BULK,
261 .wMaxPacketSize = __constant_cpu_to_le16(512),
262};
263
Manu Gautam8e0719b2011-09-26 14:47:55 +0530264static struct usb_descriptor_header *rmnet_mux_hs_function[] = {
265 (struct usb_descriptor_header *) &rmnet_mux_interface_desc,
266 (struct usb_descriptor_header *) &rmnet_mux_hs_notify_desc,
267 (struct usb_descriptor_header *) &rmnet_mux_hs_in_desc,
268 (struct usb_descriptor_header *) &rmnet_mux_hs_out_desc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 NULL,
270};
271
272/* String descriptors */
273
Manu Gautam8e0719b2011-09-26 14:47:55 +0530274static struct usb_string rmnet_mux_string_defs[] = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 [0].s = "RmNet",
276 { } /* end of list */
277};
278
Manu Gautam8e0719b2011-09-26 14:47:55 +0530279static struct usb_gadget_strings rmnet_mux_string_table = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 .language = 0x0409, /* en-us */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530281 .strings = rmnet_mux_string_defs,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282};
283
Manu Gautam8e0719b2011-09-26 14:47:55 +0530284static struct usb_gadget_strings *rmnet_mux_strings[] = {
285 &rmnet_mux_string_table,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 NULL,
287};
288
Manu Gautam8e0719b2011-09-26 14:47:55 +0530289static struct rmnet_mux_ctrl_pkt *rmnet_mux_alloc_ctrl_pkt(unsigned len,
290 gfp_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530292 struct rmnet_mux_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293
Manu Gautam8e0719b2011-09-26 14:47:55 +0530294 cpkt = kzalloc(sizeof(struct rmnet_mux_ctrl_pkt), flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 if (!cpkt)
296 return 0;
297
298 cpkt->buf = kzalloc(len, flags);
299 if (!cpkt->buf) {
300 kfree(cpkt);
301 return 0;
302 }
303
304 cpkt->len = len;
305
306 return cpkt;
307
308}
309
Manu Gautam8e0719b2011-09-26 14:47:55 +0530310static void rmnet_mux_free_ctrl_pkt(struct rmnet_mux_ctrl_pkt *cpkt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311{
312 kfree(cpkt->buf);
313 kfree(cpkt);
314}
315
316/*
317 * Allocate a usb_request and its buffer. Returns a pointer to the
318 * usb_request or a pointer with an error code if there is an error.
319 */
320static struct usb_request *
Manu Gautam8e0719b2011-09-26 14:47:55 +0530321rmnet_mux_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322{
323 struct usb_request *req;
324
325 req = usb_ep_alloc_request(ep, kmalloc_flags);
326
327 if (len && req != NULL) {
328 req->length = len;
329 req->buf = kmalloc(len, kmalloc_flags);
330 if (req->buf == NULL) {
331 usb_ep_free_request(ep, req);
332 req = NULL;
333 }
334 }
335
336 return req ? req : ERR_PTR(-ENOMEM);
337}
338
339/*
340 * Free a usb_request and its buffer.
341 */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530342static void rmnet_mux_free_req(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343{
344 kfree(req->buf);
345 usb_ep_free_request(ep, req);
346}
347
Manu Gautam8e0719b2011-09-26 14:47:55 +0530348static int rmnet_mux_sdio_rx_submit(struct rmnet_mux_dev *dev,
349 struct usb_request *req, gfp_t gfp_flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350{
351 struct sk_buff *skb;
352 int retval;
353
Manu Gautam8e0719b2011-09-26 14:47:55 +0530354 skb = alloc_skb(RMNET_MUX_SDIO_RX_REQ_SIZE + RMNET_MUX_SDIO_HDR,
355 gfp_flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 if (skb == NULL)
357 return -ENOMEM;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530358 skb_reserve(skb, RMNET_MUX_SDIO_HDR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359
360 req->buf = skb->data;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530361 req->length = RMNET_MUX_SDIO_RX_REQ_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 req->context = skb;
363
364 retval = usb_ep_queue(dev->epout, req, gfp_flags);
365 if (retval)
366 dev_kfree_skb_any(skb);
367
368 return retval;
369}
370
Manu Gautam8e0719b2011-09-26 14:47:55 +0530371static void rmnet_mux_sdio_start_rx(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530373 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 struct usb_composite_dev *cdev = dev->cdev;
375 int status;
376 struct usb_request *req;
377 struct list_head *pool;
378 unsigned long flags;
379
380 if (!atomic_read(&dev->online)) {
381 pr_debug("%s: USB not connected\n", __func__);
382 return;
383 }
384
385 spin_lock_irqsave(&dev->lock, flags);
386 pool = &sdio_dev->rx_idle;
387 while (!list_empty(pool)) {
388 req = list_first_entry(pool, struct usb_request, list);
389 list_del(&req->list);
390
391 spin_unlock_irqrestore(&dev->lock, flags);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530392 status = rmnet_mux_sdio_rx_submit(dev, req, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 spin_lock_irqsave(&dev->lock, flags);
394
395 if (status) {
Manu Gautam8e0719b2011-09-26 14:47:55 +0530396 ERROR(cdev, "rmnet_mux data rx enqueue err %d\n",
397 status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 list_add_tail(&req->list, &sdio_dev->rx_idle);
399 break;
400 }
401 }
402 spin_unlock_irqrestore(&dev->lock, flags);
403}
404
Manu Gautam8e0719b2011-09-26 14:47:55 +0530405static void rmnet_mux_sdio_start_tx(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406{
407 unsigned long flags;
408 int status;
409 struct sk_buff *skb;
410 struct usb_request *req;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530411 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 struct usb_composite_dev *cdev = dev->cdev;
413
414
415 if (!atomic_read(&dev->online))
416 return;
417
418 spin_lock_irqsave(&dev->lock, flags);
419 while (!list_empty(&sdio_dev->tx_idle)) {
420 skb = __skb_dequeue(&sdio_dev->tx_skb_queue);
421 if (!skb) {
422 spin_unlock_irqrestore(&dev->lock, flags);
423 return;
424 }
425
426 req = list_first_entry(&sdio_dev->tx_idle,
427 struct usb_request, list);
428 req->context = skb;
429 req->buf = skb->data;
430 req->length = skb->len;
431
432 list_del(&req->list);
433 spin_unlock(&dev->lock);
434 status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
435 spin_lock(&dev->lock);
436 if (status) {
437 /* USB still online, queue requests back */
438 if (atomic_read(&dev->online)) {
439 ERROR(cdev, "rmnet tx data enqueue err %d\n",
440 status);
441 list_add_tail(&req->list, &sdio_dev->tx_idle);
442 __skb_queue_head(&sdio_dev->tx_skb_queue, skb);
443 } else {
444 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530445 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 dev_kfree_skb_any(skb);
447 }
448 break;
449 }
450 dev->dpkts_tolaptop++;
451 }
452 spin_unlock_irqrestore(&dev->lock, flags);
453}
454
Manu Gautam8e0719b2011-09-26 14:47:55 +0530455static void rmnet_mux_sdio_data_receive_cb(void *priv, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530457 struct rmnet_mux_dev *dev = priv;
458 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 unsigned long flags;
460
461 if (!skb)
462 return;
463 if (!atomic_read(&dev->online)) {
464 dev_kfree_skb_any(skb);
465 return;
466 }
467 spin_lock_irqsave(&dev->lock, flags);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530468 if (sdio_dev->tx_skb_queue.qlen > mux_sdio_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 pr_err_ratelimited("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
470 __func__, dev->tx_drp_cnt);
471 dev->tx_drp_cnt++;
472 spin_unlock_irqrestore(&dev->lock, flags);
473 dev_kfree_skb_any(skb);
474 return;
475 }
476 __skb_queue_tail(&sdio_dev->tx_skb_queue, skb);
477 spin_unlock_irqrestore(&dev->lock, flags);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530478 rmnet_mux_sdio_start_tx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479}
480
Manu Gautam8e0719b2011-09-26 14:47:55 +0530481static void rmnet_mux_sdio_data_write_done(void *priv, struct sk_buff *skb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530483 struct rmnet_mux_dev *dev = priv;
484 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485
486 if (!skb)
487 return;
488
489 dev_kfree_skb_any(skb);
490 /* this function is called from
491 * sdio mux from spin_lock_irqsave
492 */
493 spin_lock(&dev->lock);
494 sdio_dev->dpkts_pending_atdmux--;
495
Manu Gautam8e0719b2011-09-26 14:47:55 +0530496 if (sdio_dev->dpkts_pending_atdmux >= mux_sdio_rx_fctrl_dis_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 spin_unlock(&dev->lock);
498 return;
499 }
500 spin_unlock(&dev->lock);
501
Manu Gautam8e0719b2011-09-26 14:47:55 +0530502 rmnet_mux_sdio_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503}
504
Manu Gautam8e0719b2011-09-26 14:47:55 +0530505static void rmnet_mux_sdio_data_rx_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530507 struct rmnet_mux_dev *dev = container_of(w, struct rmnet_mux_dev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 sdio_dev.data_rx_work);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530509 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 struct usb_composite_dev *cdev = dev->cdev;
511
512 struct sk_buff *skb;
513 int ret;
514 unsigned long flags;
515
516 spin_lock_irqsave(&dev->lock, flags);
517 while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue))) {
518 spin_unlock_irqrestore(&dev->lock, flags);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530519 ret = msm_sdio_dmux_write(rmnet_mux_sdio_data_ch, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 spin_lock_irqsave(&dev->lock, flags);
521 if (ret < 0) {
Manu Gautam8e0719b2011-09-26 14:47:55 +0530522 ERROR(cdev, "rmnet_mux SDIO data write failed\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 dev_kfree_skb_any(skb);
524 } else {
525 dev->dpkts_tomdm++;
526 sdio_dev->dpkts_pending_atdmux++;
527 }
528 }
529 spin_unlock_irqrestore(&dev->lock, flags);
530}
531
532static void
Manu Gautam8e0719b2011-09-26 14:47:55 +0530533rmnet_mux_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530535 struct rmnet_mux_dev *dev = ep->driver_data;
536 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 struct usb_composite_dev *cdev = dev->cdev;
538 struct sk_buff *skb = req->context;
539 int status = req->status;
540 int queue = 0;
541
Hemant Kumar1b820d52011-11-03 15:08:28 -0700542 if (dev->xport == USB_GADGET_XPORT_UNDEF) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 dev_kfree_skb_any(skb);
544 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530545 rmnet_mux_free_req(ep, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 return;
547 }
548
549 switch (status) {
550 case 0:
551 /* successful completion */
552 skb_put(skb, req->actual);
553 queue = 1;
554 break;
555 case -ECONNRESET:
556 case -ESHUTDOWN:
557 /* connection gone */
558 dev_kfree_skb_any(skb);
559 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530560 rmnet_mux_free_req(ep, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 return;
562 default:
563 /* unexpected failure */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530564 ERROR(cdev, "RMNET_MUX %s response error %d, %d/%d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 ep->name, status,
566 req->actual, req->length);
567 dev_kfree_skb_any(skb);
568 break;
569 }
570
571 spin_lock(&dev->lock);
572 if (queue) {
573 __skb_queue_tail(&sdio_dev->rx_skb_queue, skb);
574 queue_work(dev->wq, &sdio_dev->data_rx_work);
575 }
576
Manu Gautam8e0719b2011-09-26 14:47:55 +0530577 if (sdio_dev->dpkts_pending_atdmux >= mux_sdio_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 list_add_tail(&req->list, &sdio_dev->rx_idle);
579 spin_unlock(&dev->lock);
580 return;
581 }
582 spin_unlock(&dev->lock);
583
Manu Gautam8e0719b2011-09-26 14:47:55 +0530584 status = rmnet_mux_sdio_rx_submit(dev, req, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 if (status) {
Manu Gautam8e0719b2011-09-26 14:47:55 +0530586 ERROR(cdev, "rmnet_mux data rx enqueue err %d\n", status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 list_add_tail(&req->list, &sdio_dev->rx_idle);
588 }
589}
590
591static void
Manu Gautam8e0719b2011-09-26 14:47:55 +0530592rmnet_mux_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530594 struct rmnet_mux_dev *dev = ep->driver_data;
595 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 struct sk_buff *skb = req->context;
597 struct usb_composite_dev *cdev = dev->cdev;
598 int status = req->status;
599
Hemant Kumar1b820d52011-11-03 15:08:28 -0700600 if (dev->xport == USB_GADGET_XPORT_UNDEF) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 dev_kfree_skb_any(skb);
602 req->buf = 0;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530603 rmnet_mux_free_req(ep, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 return;
605 }
606
607 switch (status) {
608 case 0:
609 /* successful completion */
610 case -ECONNRESET:
611 case -ESHUTDOWN:
612 /* connection gone */
613 break;
614 default:
Manu Gautam8e0719b2011-09-26 14:47:55 +0530615 ERROR(cdev, "rmnet_mux data tx ep error %d\n", status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 break;
617 }
618
619 spin_lock(&dev->lock);
620 list_add_tail(&req->list, &sdio_dev->tx_idle);
621 spin_unlock(&dev->lock);
622 dev_kfree_skb_any(skb);
623
Manu Gautam8e0719b2011-09-26 14:47:55 +0530624 rmnet_mux_sdio_start_tx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625}
626
Manu Gautam8e0719b2011-09-26 14:47:55 +0530627static int rmnet_mux_sdio_enable(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530629 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 int i;
631 struct usb_request *req;
632
633 /*
634 * If the memory allocation fails, all the allocated
635 * requests will be freed upon cable disconnect.
636 */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530637 for (i = 0; i < RMNET_MUX_SDIO_RX_REQ_MAX; i++) {
638 req = rmnet_mux_alloc_req(dev->epout, 0, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 if (IS_ERR(req))
640 return PTR_ERR(req);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530641 req->complete = rmnet_mux_sdio_complete_epout;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 list_add_tail(&req->list, &sdio_dev->rx_idle);
643 }
Manu Gautam8e0719b2011-09-26 14:47:55 +0530644 for (i = 0; i < RMNET_MUX_SDIO_TX_REQ_MAX; i++) {
645 req = rmnet_mux_alloc_req(dev->epin, 0, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 if (IS_ERR(req))
647 return PTR_ERR(req);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530648 req->complete = rmnet_mux_sdio_complete_epin;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 list_add_tail(&req->list, &sdio_dev->tx_idle);
650 }
651
Manu Gautam8e0719b2011-09-26 14:47:55 +0530652 rmnet_mux_sdio_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 return 0;
654}
655
Manu Gautam8e0719b2011-09-26 14:47:55 +0530656static void rmnet_mux_smd_start_rx(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657{
658 struct usb_composite_dev *cdev = dev->cdev;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530659 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 int status;
661 struct usb_request *req;
662 struct list_head *pool = &smd_dev->rx_idle;
663 unsigned long flags;
664
665 spin_lock_irqsave(&dev->lock, flags);
666 while (!list_empty(pool)) {
667 req = list_entry(pool->next, struct usb_request, list);
668 list_del(&req->list);
669
670 spin_unlock_irqrestore(&dev->lock, flags);
671 status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
672 spin_lock_irqsave(&dev->lock, flags);
673
674 if (status) {
675 ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
676 list_add_tail(&req->list, pool);
677 break;
678 }
679 }
680 spin_unlock_irqrestore(&dev->lock, flags);
681}
682
Manu Gautam8e0719b2011-09-26 14:47:55 +0530683static void rmnet_mux_smd_data_tx_tlet(unsigned long arg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530685 struct rmnet_mux_dev *dev = (struct rmnet_mux_dev *) arg;
686 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687 struct usb_composite_dev *cdev = dev->cdev;
688 struct usb_request *req;
689 int status;
690 int sz;
691 unsigned long flags;
692
693 while (1) {
694 if (!atomic_read(&dev->online))
695 break;
696 sz = smd_cur_packet_size(smd_dev->smd_data.ch);
697 if (sz == 0)
698 break;
699 if (smd_read_avail(smd_dev->smd_data.ch) < sz)
700 break;
701
702 spin_lock_irqsave(&dev->lock, flags);
703 if (list_empty(&smd_dev->tx_idle)) {
704 spin_unlock_irqrestore(&dev->lock, flags);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530705 DBG(cdev, "rmnet_mux data Tx buffers full\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 break;
707 }
708 req = list_first_entry(&smd_dev->tx_idle,
709 struct usb_request, list);
710 list_del(&req->list);
711 spin_unlock_irqrestore(&dev->lock, flags);
712
713 req->length = smd_read(smd_dev->smd_data.ch, req->buf, sz);
714 status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
715 if (status) {
716 ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
717 spin_lock_irqsave(&dev->lock, flags);
718 list_add_tail(&req->list, &smd_dev->tx_idle);
719 spin_unlock_irqrestore(&dev->lock, flags);
720 break;
721 }
722 dev->dpkts_tolaptop++;
723 }
724
725}
726
Manu Gautam8e0719b2011-09-26 14:47:55 +0530727static void rmnet_mux_smd_data_rx_tlet(unsigned long arg)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530729 struct rmnet_mux_dev *dev = (struct rmnet_mux_dev *) arg;
730 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 struct usb_composite_dev *cdev = dev->cdev;
732 struct usb_request *req;
733 int ret;
734 unsigned long flags;
735
736 spin_lock_irqsave(&dev->lock, flags);
737 while (1) {
738 if (!atomic_read(&dev->online))
739 break;
740 if (list_empty(&smd_dev->rx_queue)) {
741 atomic_set(&smd_dev->smd_data.rx_pkt, 0);
742 break;
743 }
744 req = list_first_entry(&smd_dev->rx_queue,
745 struct usb_request, list);
746 if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
747 atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530748 DBG(cdev, "rmnet_mux SMD data channel full\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 break;
750 }
751
752 list_del(&req->list);
753 spin_unlock_irqrestore(&dev->lock, flags);
754 ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
755 spin_lock_irqsave(&dev->lock, flags);
756 if (ret != req->actual) {
Manu Gautam8e0719b2011-09-26 14:47:55 +0530757 ERROR(cdev, "rmnet_mux SMD data write failed\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 break;
759 }
760 dev->dpkts_tomsm++;
761 list_add_tail(&req->list, &smd_dev->rx_idle);
762 }
763 spin_unlock_irqrestore(&dev->lock, flags);
764
765 /* We have free rx data requests. */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530766 rmnet_mux_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767}
768
769/* If SMD has enough room to accommodate a data rx packet,
770 * write into SMD directly. Otherwise enqueue to rx_queue.
771 * We will not write into SMD directly untill rx_queue is
772 * empty to strictly follow the ordering requests.
773 */
774static void
Manu Gautam8e0719b2011-09-26 14:47:55 +0530775rmnet_mux_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530777 struct rmnet_mux_dev *dev = req->context;
778 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 struct usb_composite_dev *cdev = dev->cdev;
780 int status = req->status;
781 int ret;
782
Hemant Kumar1b820d52011-11-03 15:08:28 -0700783 if (dev->xport == USB_GADGET_XPORT_UNDEF) {
Manu Gautam8e0719b2011-09-26 14:47:55 +0530784 rmnet_mux_free_req(ep, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 return;
786 }
787
788 switch (status) {
789 case 0:
790 /* normal completion */
791 break;
792 case -ECONNRESET:
793 case -ESHUTDOWN:
794 /* connection gone */
795 spin_lock(&dev->lock);
796 list_add_tail(&req->list, &smd_dev->rx_idle);
797 spin_unlock(&dev->lock);
798 return;
799 default:
800 /* unexpected failure */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530801 ERROR(cdev, "RMNET_MUX %s response error %d, %d/%d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 ep->name, status,
803 req->actual, req->length);
804 spin_lock(&dev->lock);
805 list_add_tail(&req->list, &smd_dev->rx_idle);
806 spin_unlock(&dev->lock);
807 return;
808 }
809
810 spin_lock(&dev->lock);
811 if (!atomic_read(&smd_dev->smd_data.rx_pkt)) {
812 if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
813 atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
814 goto queue_req;
815 }
816 spin_unlock(&dev->lock);
817 ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
818 /* This should never happen */
819 if (ret != req->actual)
Manu Gautam8e0719b2011-09-26 14:47:55 +0530820 ERROR(cdev, "rmnet_mux data smd write failed\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 /* Restart Rx */
822 dev->dpkts_tomsm++;
823 spin_lock(&dev->lock);
824 list_add_tail(&req->list, &smd_dev->rx_idle);
825 spin_unlock(&dev->lock);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530826 rmnet_mux_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 return;
828 }
829queue_req:
830 list_add_tail(&req->list, &smd_dev->rx_queue);
831 spin_unlock(&dev->lock);
832}
833
Manu Gautam8e0719b2011-09-26 14:47:55 +0530834static void rmnet_mux_smd_complete_epin(struct usb_ep *ep,
835 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530837 struct rmnet_mux_dev *dev = req->context;
838 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 struct usb_composite_dev *cdev = dev->cdev;
840 int status = req->status;
841 int schedule = 0;
842
Hemant Kumar1b820d52011-11-03 15:08:28 -0700843 if (dev->xport == USB_GADGET_XPORT_UNDEF) {
Manu Gautam8e0719b2011-09-26 14:47:55 +0530844 rmnet_mux_free_req(ep, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 return;
846 }
847
848 switch (status) {
849 case -ECONNRESET:
850 case -ESHUTDOWN:
851 /* connection gone */
852 spin_lock(&dev->lock);
853 list_add_tail(&req->list, &smd_dev->tx_idle);
854 spin_unlock(&dev->lock);
855 break;
856 default:
Manu Gautam8e0719b2011-09-26 14:47:55 +0530857 ERROR(cdev, "rmnet_mux data tx ep error %d\n", status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700858 /* FALLTHROUGH */
859 case 0:
860 spin_lock(&dev->lock);
861 if (list_empty(&smd_dev->tx_idle))
862 schedule = 1;
863 list_add_tail(&req->list, &smd_dev->tx_idle);
864
865 if (schedule)
866 tasklet_schedule(&smd_dev->smd_data.tx_tlet);
867 spin_unlock(&dev->lock);
868 break;
869 }
870
871}
872
873
Manu Gautam8e0719b2011-09-26 14:47:55 +0530874static void rmnet_mux_smd_notify(void *priv, unsigned event)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530876 struct rmnet_mux_dev *dev = priv;
877 struct rmnet_mux_smd_info *smd_info = &dev->smd_dev.smd_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 int len = atomic_read(&smd_info->rx_pkt);
879
880 switch (event) {
881 case SMD_EVENT_DATA: {
882 if (!atomic_read(&dev->online))
883 break;
884 if (len && (smd_write_avail(smd_info->ch) >= len))
885 tasklet_schedule(&smd_info->rx_tlet);
886
887 if (smd_read_avail(smd_info->ch))
888 tasklet_schedule(&smd_info->tx_tlet);
889
890 break;
891 }
892 case SMD_EVENT_OPEN:
893 /* usb endpoints are not enabled untill smd channels
894 * are opened. wake up worker thread to continue
895 * connection processing
896 */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530897 set_bit(RMNET_MUX_CH_OPENED, &smd_info->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 wake_up(&smd_info->wait);
899 break;
900 case SMD_EVENT_CLOSE:
901 /* We will never come here.
902 * reset flags after closing smd channel
903 * */
Manu Gautam8e0719b2011-09-26 14:47:55 +0530904 clear_bit(RMNET_MUX_CH_OPENED, &smd_info->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 break;
906 }
907}
908
Manu Gautam8e0719b2011-09-26 14:47:55 +0530909static int rmnet_mux_smd_enable(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910{
911 struct usb_composite_dev *cdev = dev->cdev;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530912 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 int i, ret;
914 struct usb_request *req;
915
Manu Gautam8e0719b2011-09-26 14:47:55 +0530916 if (test_bit(RMNET_MUX_CH_OPENED, &smd_dev->smd_data.flags))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 goto smd_alloc_req;
918
Manu Gautam8e0719b2011-09-26 14:47:55 +0530919 ret = smd_open(rmnet_mux_smd_data_ch, &smd_dev->smd_data.ch,
920 dev, rmnet_mux_smd_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 if (ret) {
922 ERROR(cdev, "Unable to open data smd channel\n");
923 return ret;
924 }
925
Manu Gautam8e0719b2011-09-26 14:47:55 +0530926 wait_event(smd_dev->smd_data.wait, test_bit(RMNET_MUX_CH_OPENED,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 &smd_dev->smd_data.flags));
928
929 /* Allocate bulk in/out requests for data transfer.
930 * If the memory allocation fails, all the allocated
931 * requests will be freed upon cable disconnect.
932 */
933smd_alloc_req:
Manu Gautam8e0719b2011-09-26 14:47:55 +0530934 for (i = 0; i < RMNET_MUX_SMD_RX_REQ_MAX; i++) {
935 req = rmnet_mux_alloc_req(dev->epout, RMNET_MUX_SMD_RX_REQ_SIZE,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936 GFP_KERNEL);
937 if (IS_ERR(req))
938 return PTR_ERR(req);
Manu Gautam8e0719b2011-09-26 14:47:55 +0530939 req->length = RMNET_MUX_SMD_TXN_MAX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 req->context = dev;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530941 req->complete = rmnet_mux_smd_complete_epout;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 list_add_tail(&req->list, &smd_dev->rx_idle);
943 }
944
Manu Gautam8e0719b2011-09-26 14:47:55 +0530945 for (i = 0; i < RMNET_MUX_SMD_TX_REQ_MAX; i++) {
946 req = rmnet_mux_alloc_req(dev->epin, RMNET_MUX_SMD_TX_REQ_SIZE,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947 GFP_KERNEL);
948 if (IS_ERR(req))
949 return PTR_ERR(req);
950 req->context = dev;
Manu Gautam8e0719b2011-09-26 14:47:55 +0530951 req->complete = rmnet_mux_smd_complete_epin;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 list_add_tail(&req->list, &smd_dev->tx_idle);
953 }
954
Manu Gautam8e0719b2011-09-26 14:47:55 +0530955 rmnet_mux_smd_start_rx(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 return 0;
957}
958
Manu Gautam8e0719b2011-09-26 14:47:55 +0530959static void rmnet_mux_notify_complete(struct usb_ep *ep,
960 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961{
Manu Gautam8e0719b2011-09-26 14:47:55 +0530962 struct rmnet_mux_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 struct usb_composite_dev *cdev = dev->cdev;
964 int status = req->status;
965
966 switch (status) {
967 case -ECONNRESET:
968 case -ESHUTDOWN:
969 /* connection gone */
970 atomic_set(&dev->notify_count, 0);
971 break;
972 default:
Manu Gautam8e0719b2011-09-26 14:47:55 +0530973 ERROR(cdev, "rmnet_mux notifyep error %d\n", status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 /* FALLTHROUGH */
975 case 0:
976
977 if (atomic_dec_and_test(&dev->notify_count))
978 break;
979
980 status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
981 if (status) {
982 atomic_dec(&dev->notify_count);
983 ERROR(cdev, "rmnet notify ep enq error %d\n", status);
984 }
985 break;
986 }
987}
988
Manu Gautam8e0719b2011-09-26 14:47:55 +0530989static void ctrl_response_available(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990{
991 struct usb_composite_dev *cdev = dev->cdev;
992 struct usb_request *req = dev->notify_req;
993 struct usb_cdc_notification *event = req->buf;
994 int status;
995
996 /* Response will be sent later */
997 if (atomic_inc_return(&dev->notify_count) != 1)
998 return;
999
1000 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
1001 | USB_RECIP_INTERFACE;
1002 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
1003 event->wValue = cpu_to_le16(0);
1004 event->wIndex = cpu_to_le16(dev->ifc_id);
1005 event->wLength = cpu_to_le16(0);
1006
1007 status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
1008 if (status < 0) {
1009 atomic_dec(&dev->notify_count);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301010 ERROR(cdev, "rmnet_mux notify ep enqueue error %d\n", status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 }
1012}
1013
1014#define MAX_CTRL_PKT_SIZE 4096
1015
Manu Gautam8e0719b2011-09-26 14:47:55 +05301016static void rmnet_mux_response_complete(struct usb_ep *ep,
1017 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301019 struct rmnet_mux_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 struct usb_composite_dev *cdev = dev->cdev;
1021
1022 switch (req->status) {
1023 case -ECONNRESET:
1024 case -ESHUTDOWN:
1025 case 0:
1026 return;
1027 default:
Manu Gautam8e0719b2011-09-26 14:47:55 +05301028 INFO(cdev, "rmnet_mux %s response error %d, %d/%d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029 ep->name, req->status,
1030 req->actual, req->length);
1031 }
1032}
1033
Manu Gautam8e0719b2011-09-26 14:47:55 +05301034static void rmnet_mux_command_complete(struct usb_ep *ep,
1035 struct usb_request *req)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301037 struct rmnet_mux_dev *dev = req->context;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 struct usb_composite_dev *cdev = dev->cdev;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301039 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
1040 struct rmnet_mux_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041 int len = req->actual;
1042
1043 if (req->status < 0) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301044 ERROR(cdev, "rmnet_mux command error %d\n", req->status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 return;
1046 }
1047
Manu Gautam8e0719b2011-09-26 14:47:55 +05301048 cpkt = rmnet_mux_alloc_ctrl_pkt(len, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 if (!cpkt) {
1050 ERROR(cdev, "unable to allocate memory for ctrl req\n");
1051 return;
1052 }
1053
1054 spin_lock(&dev->lock);
1055 if (!ctrl_dev->opened) {
1056 spin_unlock(&dev->lock);
Rajkumar Raghupathy01f599c2011-10-25 15:32:43 +05301057 rmnet_mux_free_ctrl_pkt(cpkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 dev->cpkts_drp_cnt++;
1059 pr_err_ratelimited(
1060 "%s: ctrl pkts dropped: cpkts_drp_cnt: %lu\n",
1061 __func__, dev->cpkts_drp_cnt);
1062 return;
1063 }
1064
1065 memcpy(cpkt->buf, req->buf, len);
1066
1067 list_add_tail(&cpkt->list, &ctrl_dev->tx_q);
1068 ctrl_dev->tx_len++;
1069 spin_unlock(&dev->lock);
1070
1071 /* wakeup read thread */
1072 wake_up(&ctrl_dev->tx_wait_q);
1073}
1074
1075static int
Manu Gautam8e0719b2011-09-26 14:47:55 +05301076rmnet_mux_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301078 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev,
1079 function);
1080 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 struct usb_composite_dev *cdev = f->config->cdev;
1082 struct usb_request *req = cdev->req;
1083 int ret = -EOPNOTSUPP;
1084 u16 w_index = le16_to_cpu(ctrl->wIndex);
1085 u16 w_value = le16_to_cpu(ctrl->wValue);
1086 u16 w_length = le16_to_cpu(ctrl->wLength);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301087 struct rmnet_mux_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088
1089 if (!atomic_read(&dev->online))
1090 return -ENOTCONN;
1091
1092 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
1093
1094 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
1095 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 ret = w_length;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301097 req->complete = rmnet_mux_command_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098 req->context = dev;
1099 break;
1100
1101
1102 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
1103 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
1104 if (w_value)
1105 goto invalid;
1106 else {
1107 unsigned len;
1108
1109 spin_lock(&dev->lock);
1110 if (list_empty(&ctrl_dev->rx_q)) {
1111 DBG(cdev, "ctrl resp queue empty"
1112 " %02x.%02x v%04x i%04x l%d\n",
1113 ctrl->bRequestType, ctrl->bRequest,
1114 w_value, w_index, w_length);
1115 spin_unlock(&dev->lock);
1116 goto invalid;
1117
1118 }
1119 cpkt = list_first_entry(&ctrl_dev->rx_q,
Manu Gautam8e0719b2011-09-26 14:47:55 +05301120 struct rmnet_mux_ctrl_pkt, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 list_del(&cpkt->list);
1122 ctrl_dev->rx_len--;
1123 spin_unlock(&dev->lock);
1124
1125 len = min_t(unsigned, w_length, cpkt->len);
1126 memcpy(req->buf, cpkt->buf, len);
1127 ret = len;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301128 req->complete = rmnet_mux_response_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 req->context = dev;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301130 rmnet_mux_free_ctrl_pkt(cpkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131
1132 dev->cpkts_tolaptop++;
1133 }
1134 break;
1135 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
1136 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
1137 /* This is a workaround for RmNet and is borrowed from the
1138 * CDC/ACM standard. The host driver will issue the above ACM
1139 * standard request to the RmNet interface in the following
1140 * scenario: Once the network adapter is disabled from device
1141 * manager, the above request will be sent from the qcusbnet
1142 * host driver, with DTR being '0'. Once network adapter is
1143 * enabled from device manager (or during enumeration), the
1144 * request will be sent with DTR being '1'.
1145 */
Manu Gautam8e0719b2011-09-26 14:47:55 +05301146 if (w_value & RMNET_MUX_ACM_CTRL_DTR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001147 ctrl_dev->cbits_to_modem |= TIOCM_DTR;
1148 else
1149 ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
1150
1151 ret = 0;
1152
1153 break;
1154 default:
1155
1156invalid:
1157 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
1158 ctrl->bRequestType, ctrl->bRequest,
1159 w_value, w_index, w_length);
1160 }
1161
1162 /* respond with data transfer or status phase? */
1163 if (ret >= 0) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301164 VDBG(cdev, "rmnet_mux req%02x.%02x v%04x i%04x l%d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 ctrl->bRequestType, ctrl->bRequest,
1166 w_value, w_index, w_length);
1167 req->zero = (ret < w_length);
1168 req->length = ret;
1169 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
1170 if (ret < 0)
Manu Gautam8e0719b2011-09-26 14:47:55 +05301171 ERROR(cdev, "rmnet_mux ep0 enqueue err %d\n", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172 }
1173
1174 return ret;
1175}
1176
Manu Gautam8e0719b2011-09-26 14:47:55 +05301177static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301179 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
1180 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
1181 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
1182 struct rmnet_mux_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 struct usb_request *req;
1184 struct list_head *pool;
1185 struct sk_buff *skb;
1186 unsigned long flags;
1187
1188 spin_lock_irqsave(&dev->lock, flags);
1189 /* free all usb requests in SDIO tx pool */
1190 pool = &sdio_dev->tx_idle;
1191 while (!list_empty(pool)) {
1192 req = list_first_entry(pool, struct usb_request, list);
1193 list_del(&req->list);
1194 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301195 rmnet_mux_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 }
1197
1198 pool = &sdio_dev->rx_idle;
1199 /* free all usb requests in SDIO rx pool */
1200 while (!list_empty(pool)) {
1201 req = list_first_entry(pool, struct usb_request, list);
1202 list_del(&req->list);
1203 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301204 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 }
1206
1207 while ((skb = __skb_dequeue(&sdio_dev->tx_skb_queue)))
1208 dev_kfree_skb_any(skb);
1209
1210 while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue)))
1211 dev_kfree_skb_any(skb);
1212
1213 /* free all usb requests in SMD tx pool */
1214 pool = &smd_dev->tx_idle;
1215 while (!list_empty(pool)) {
1216 req = list_first_entry(pool, struct usb_request, list);
1217 list_del(&req->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301218 rmnet_mux_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 }
1220
1221 pool = &smd_dev->rx_idle;
1222 /* free all usb requests in SMD rx pool */
1223 while (!list_empty(pool)) {
1224 req = list_first_entry(pool, struct usb_request, list);
1225 list_del(&req->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301226 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 }
1228
1229 /* free all usb requests in SMD rx queue */
1230 pool = &smd_dev->rx_queue;
1231 while (!list_empty(pool)) {
1232 req = list_first_entry(pool, struct usb_request, list);
1233 list_del(&req->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301234 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235 }
1236
1237 pool = &ctrl_dev->tx_q;
1238 while (!list_empty(pool)) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301239 cpkt = list_first_entry(pool, struct rmnet_mux_ctrl_pkt, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 list_del(&cpkt->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301241 rmnet_mux_free_ctrl_pkt(cpkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 ctrl_dev->tx_len--;
1243 }
1244
1245 pool = &ctrl_dev->rx_q;
1246 while (!list_empty(pool)) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301247 cpkt = list_first_entry(pool, struct rmnet_mux_ctrl_pkt, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 list_del(&cpkt->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301249 rmnet_mux_free_ctrl_pkt(cpkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 ctrl_dev->rx_len--;
1251 }
1252 spin_unlock_irqrestore(&dev->lock, flags);
1253}
1254
Manu Gautam8e0719b2011-09-26 14:47:55 +05301255static void rmnet_mux_disconnect_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301257 struct rmnet_mux_dev *dev = container_of(w, struct rmnet_mux_dev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 disconnect_work);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301259 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
1260 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261
Hemant Kumar1b820d52011-11-03 15:08:28 -07001262 if (dev->xport == USB_GADGET_XPORT_SMD) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 tasklet_kill(&smd_dev->smd_data.rx_tlet);
1264 tasklet_kill(&smd_dev->smd_data.tx_tlet);
1265 }
1266
Manu Gautam8e0719b2011-09-26 14:47:55 +05301267 rmnet_mux_free_buf(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 dev->xport = 0;
1269
1270 /* wakeup read thread */
1271 wake_up(&ctrl_dev->tx_wait_q);
1272}
1273
Manu Gautam8e0719b2011-09-26 14:47:55 +05301274static void rmnet_mux_suspend(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301276 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev,
1277 function);
1278 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279
1280 if (!atomic_read(&dev->online))
1281 return;
1282 /* This is a workaround for Windows Host bug during suspend.
1283 * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
1284 * Since it is not being done, Hence exclusively dropping the DTR
1285 * from function driver suspend.
1286 */
1287 ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
1288}
1289
Manu Gautam8e0719b2011-09-26 14:47:55 +05301290static void rmnet_mux_disable(struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301292 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev,
1293 function);
1294 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295
1296 if (!atomic_read(&dev->online))
1297 return;
1298
1299 atomic_set(&dev->online, 0);
1300
1301 usb_ep_fifo_flush(dev->epnotify);
1302 usb_ep_disable(dev->epnotify);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301303 rmnet_mux_free_req(dev->epnotify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304
1305 usb_ep_fifo_flush(dev->epout);
1306 usb_ep_disable(dev->epout);
1307
1308 usb_ep_fifo_flush(dev->epin);
1309 usb_ep_disable(dev->epin);
1310
1311 /* cleanup work */
1312 ctrl_dev->cbits_to_modem = 0;
1313 queue_work(dev->wq, &dev->disconnect_work);
1314}
1315
1316#define SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000)
1317#define SDIO_OPEN_MAX_RETRY 90
Manu Gautam8e0719b2011-09-26 14:47:55 +05301318static void rmnet_mux_open_sdio_work(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301320 struct rmnet_mux_dev *dev =
1321 container_of(w, struct rmnet_mux_dev, sdio_dev.open_work.work);
1322 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 struct usb_composite_dev *cdev = dev->cdev;
1324 int ret;
1325 static int retry_cnt;
1326
1327 /* Data channel for network packets */
Manu Gautam8e0719b2011-09-26 14:47:55 +05301328 ret = msm_sdio_dmux_open(rmnet_mux_sdio_data_ch, dev,
1329 rmnet_mux_sdio_data_receive_cb,
1330 rmnet_mux_sdio_data_write_done);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331 if (ret) {
1332 if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
1333 ERROR(cdev, "Unable to open SDIO DATA channel\n");
1334 return;
1335 }
1336 retry_cnt++;
1337 queue_delayed_work(dev->wq, &sdio_dev->open_work,
1338 SDIO_OPEN_RETRY_DELAY);
1339 return;
1340 }
1341
1342
1343 atomic_set(&sdio_dev->sdio_open, 1);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301344 pr_info("%s: usb rmnet_mux sdio channels are open retry_cnt:%d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345 __func__, retry_cnt);
1346 retry_cnt = 0;
1347 return;
1348}
1349
Manu Gautam8e0719b2011-09-26 14:47:55 +05301350static int rmnet_mux_set_alt(struct usb_function *f,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351 unsigned intf, unsigned alt)
1352{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301353 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev,
1354 function);
1355 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 struct usb_composite_dev *cdev = dev->cdev;
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001357 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358
1359 /* allocate notification */
Manu Gautam8e0719b2011-09-26 14:47:55 +05301360 dev->notify_req = rmnet_mux_alloc_req(dev->epnotify,
1361 RMNET_MUX_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362
1363 if (IS_ERR(dev->notify_req))
1364 return PTR_ERR(dev->notify_req);
1365
Manu Gautam8e0719b2011-09-26 14:47:55 +05301366 dev->notify_req->complete = rmnet_mux_notify_complete;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367 dev->notify_req->context = dev;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301368 dev->notify_req->length = RMNET_MUX_SDIO_MAX_NFY_SZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001370 /* Enable epin */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 dev->epin->driver_data = dev;
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001372 ret = config_ep_by_speed(cdev->gadget, f, dev->epin);
1373 if (ret) {
1374 dev->epin->desc = NULL;
1375 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1376 dev->epin->name, ret);
1377 return ret;
1378 }
1379 ret = usb_ep_enable(dev->epin);
1380 if (ret) {
1381 ERROR(cdev, "can't enable %s, result %d\n",
1382 dev->epin->name, ret);
1383 return ret;
1384 }
1385
1386 /* Enable epout */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387 dev->epout->driver_data = dev;
Tatyana Brokhman31ac3522011-06-28 15:33:50 +02001388 ret = config_ep_by_speed(cdev->gadget, f, dev->epout);
1389 if (ret) {
1390 dev->epout->desc = NULL;
1391 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1392 dev->epout->name, ret);
1393 usb_ep_disable(dev->epin);
1394 return ret;
1395 }
1396 ret = usb_ep_enable(dev->epout);
1397 if (ret) {
1398 ERROR(cdev, "can't enable %s, result %d\n",
1399 dev->epout->name, ret);
1400 usb_ep_disable(dev->epin);
1401 return ret;
1402 }
1403
1404 /* Enable epnotify */
1405 ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify);
1406 if (ret) {
1407 dev->epnotify->desc = NULL;
1408 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
1409 dev->epnotify->name, ret);
1410 usb_ep_disable(dev->epin);
1411 usb_ep_disable(dev->epout);
1412 return ret;
1413 }
1414 ret = usb_ep_enable(dev->epnotify);
1415 if (ret) {
1416 ERROR(cdev, "can't enable %s, result %d\n",
1417 dev->epnotify->name, ret);
1418 usb_ep_disable(dev->epin);
1419 usb_ep_disable(dev->epout);
1420 return ret;
1421 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422
1423 dev->dpkts_tolaptop = 0;
1424 dev->cpkts_tolaptop = 0;
1425 dev->cpkts_tomdm = 0;
1426 dev->dpkts_tomdm = 0;
1427 dev->dpkts_tomsm = 0;
1428 dev->tx_drp_cnt = 0;
1429 dev->cpkts_drp_cnt = 0;
1430 sdio_dev->dpkts_pending_atdmux = 0;
1431 atomic_set(&dev->online, 1);
1432
1433 return 0;
1434}
1435
1436static ssize_t transport_store(
1437 struct device *device, struct device_attribute *attr,
1438 const char *buf, size_t size)
1439{
Anji jonnalab44169a2011-12-21 11:23:05 +05301440 struct rmnet_mux_dev *dev = rmux_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441 int value;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001442 enum transport_type given_xport;
1443 enum transport_type t;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301444 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
1445 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 struct list_head *pool;
1447 struct sk_buff_head *skb_pool;
1448 struct sk_buff *skb;
1449 struct usb_request *req;
1450 unsigned long flags;
1451
1452 if (!atomic_read(&dev->online)) {
1453 pr_err("%s: usb cable is not connected\n", __func__);
1454 return -EINVAL;
1455 }
1456
1457 sscanf(buf, "%d", &value);
1458 if (value)
Hemant Kumar1b820d52011-11-03 15:08:28 -07001459 given_xport = USB_GADGET_XPORT_SDIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 else
Hemant Kumar1b820d52011-11-03 15:08:28 -07001461 given_xport = USB_GADGET_XPORT_SMD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462
1463 if (given_xport == dev->xport) {
1464 pr_err("%s: given_xport:%s cur_xport:%s doing nothing\n",
1465 __func__, xport_to_str(given_xport),
1466 xport_to_str(dev->xport));
1467 return 0;
1468 }
1469
Manu Gautam8e0719b2011-09-26 14:47:55 +05301470 pr_debug("usb_rmnet_mux: TransportRequested: %s\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 xport_to_str(given_xport));
1472
1473 /* prevent any other pkts to/from usb */
1474 t = dev->xport;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001475 dev->xport = USB_GADGET_XPORT_UNDEF;
1476 if (t != USB_GADGET_XPORT_UNDEF) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 usb_ep_fifo_flush(dev->epin);
1478 usb_ep_fifo_flush(dev->epout);
1479 }
1480
1481 switch (t) {
Hemant Kumar1b820d52011-11-03 15:08:28 -07001482 case USB_GADGET_XPORT_SDIO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483 spin_lock_irqsave(&dev->lock, flags);
1484 /* tx_idle */
1485
1486 sdio_dev->dpkts_pending_atdmux = 0;
1487
1488 pool = &sdio_dev->tx_idle;
1489 while (!list_empty(pool)) {
1490 req = list_first_entry(pool, struct usb_request, list);
1491 list_del(&req->list);
1492 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301493 rmnet_mux_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 }
1495
1496 /* rx_idle */
1497 pool = &sdio_dev->rx_idle;
1498 /* free all usb requests in SDIO rx pool */
1499 while (!list_empty(pool)) {
1500 req = list_first_entry(pool, struct usb_request, list);
1501 list_del(&req->list);
1502 req->buf = NULL;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301503 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001504 }
1505
1506 /* tx_skb_queue */
1507 skb_pool = &sdio_dev->tx_skb_queue;
1508 while ((skb = __skb_dequeue(skb_pool)))
1509 dev_kfree_skb_any(skb);
1510 /* rx_skb_queue */
1511 skb_pool = &sdio_dev->rx_skb_queue;
1512 while ((skb = __skb_dequeue(skb_pool)))
1513 dev_kfree_skb_any(skb);
1514
1515 spin_unlock_irqrestore(&dev->lock, flags);
1516 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001517 case USB_GADGET_XPORT_SMD:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 /* close smd xport */
1519 tasklet_kill(&smd_dev->smd_data.rx_tlet);
1520 tasklet_kill(&smd_dev->smd_data.tx_tlet);
1521
1522 spin_lock_irqsave(&dev->lock, flags);
1523 /* free all usb requests in SMD tx pool */
1524 pool = &smd_dev->tx_idle;
1525 while (!list_empty(pool)) {
1526 req = list_first_entry(pool, struct usb_request, list);
1527 list_del(&req->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301528 rmnet_mux_free_req(dev->epout, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 }
1530
1531 pool = &smd_dev->rx_idle;
1532 /* free all usb requests in SMD rx pool */
1533 while (!list_empty(pool)) {
1534 req = list_first_entry(pool, struct usb_request, list);
1535 list_del(&req->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301536 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537 }
1538
1539 /* free all usb requests in SMD rx queue */
1540 pool = &smd_dev->rx_queue;
1541 while (!list_empty(pool)) {
1542 req = list_first_entry(pool, struct usb_request, list);
1543 list_del(&req->list);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301544 rmnet_mux_free_req(dev->epin, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545 }
1546
1547 spin_unlock_irqrestore(&dev->lock, flags);
1548 break;
1549 default:
1550 pr_debug("%s: undefined xport, do nothing\n", __func__);
1551 }
1552
1553 dev->xport = given_xport;
1554
1555 switch (dev->xport) {
Hemant Kumar1b820d52011-11-03 15:08:28 -07001556 case USB_GADGET_XPORT_SDIO:
Manu Gautam8e0719b2011-09-26 14:47:55 +05301557 rmnet_mux_sdio_enable(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001559 case USB_GADGET_XPORT_SMD:
Manu Gautam8e0719b2011-09-26 14:47:55 +05301560 rmnet_mux_smd_enable(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001561 break;
1562 default:
1563 /* we should never come here */
1564 pr_err("%s: undefined transport\n", __func__);
1565 }
1566
1567 return size;
1568}
1569static DEVICE_ATTR(transport, S_IRUGO | S_IWUSR, NULL, transport_store);
1570
Manu Gautam8e0719b2011-09-26 14:47:55 +05301571static int rmnet_mux_bind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572{
1573 struct usb_composite_dev *cdev = c->cdev;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301574 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev,
1575 function);
1576 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
1577 int id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 struct usb_ep *ep;
1579
1580 dev->cdev = cdev;
1581
1582 /* allocate interface ID */
1583 id = usb_interface_id(c, f);
1584 if (id < 0)
1585 return id;
1586 dev->ifc_id = id;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301587 rmnet_mux_interface_desc.bInterfaceNumber = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588
Manu Gautam8e0719b2011-09-26 14:47:55 +05301589 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_mux_fs_in_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 if (!ep)
1591 goto out;
1592 ep->driver_data = cdev; /* claim endpoint */
1593 dev->epin = ep;
1594
Manu Gautam8e0719b2011-09-26 14:47:55 +05301595 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_mux_fs_out_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 if (!ep)
1597 goto out;
1598 ep->driver_data = cdev; /* claim endpoint */
1599 dev->epout = ep;
1600
Manu Gautam8e0719b2011-09-26 14:47:55 +05301601 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_mux_fs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602 if (!ep)
1603 goto out;
1604 ep->driver_data = cdev; /* claim endpoint */
1605 dev->epnotify = ep;
1606
1607 /* support all relevant hardware speeds... we expect that when
1608 * hardware is dual speed, all bulk-capable endpoints work at
1609 * both speeds
1610 */
1611 if (gadget_is_dualspeed(c->cdev->gadget)) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301612 rmnet_mux_hs_in_desc.bEndpointAddress =
1613 rmnet_mux_fs_in_desc.bEndpointAddress;
1614 rmnet_mux_hs_out_desc.bEndpointAddress =
1615 rmnet_mux_fs_out_desc.bEndpointAddress;
1616 rmnet_mux_hs_notify_desc.bEndpointAddress =
1617 rmnet_mux_fs_notify_desc.bEndpointAddress;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618 }
1619
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001620 queue_delayed_work(dev->wq, &sdio_dev->open_work, 0);
1621
1622 return 0;
1623
1624out:
1625 if (dev->epnotify)
1626 dev->epnotify->driver_data = NULL;
1627 if (dev->epout)
1628 dev->epout->driver_data = NULL;
1629 if (dev->epin)
1630 dev->epin->driver_data = NULL;
1631
1632 return -ENODEV;
1633}
1634
Manu Gautam8e0719b2011-09-26 14:47:55 +05301635static void rmnet_mux_smd_init(struct rmnet_mux_smd_dev *smd_dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001636{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301637 struct rmnet_mux_dev *dev = container_of(smd_dev,
1638 struct rmnet_mux_dev, smd_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639
1640 atomic_set(&smd_dev->smd_data.rx_pkt, 0);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301641 tasklet_init(&smd_dev->smd_data.rx_tlet, rmnet_mux_smd_data_rx_tlet,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 (unsigned long) dev);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301643 tasklet_init(&smd_dev->smd_data.tx_tlet, rmnet_mux_smd_data_tx_tlet,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001644 (unsigned long) dev);
1645
1646 init_waitqueue_head(&smd_dev->smd_data.wait);
1647
1648 INIT_LIST_HEAD(&smd_dev->rx_idle);
1649 INIT_LIST_HEAD(&smd_dev->rx_queue);
1650 INIT_LIST_HEAD(&smd_dev->tx_idle);
1651}
1652
Manu Gautam8e0719b2011-09-26 14:47:55 +05301653static void rmnet_mux_sdio_init(struct rmnet_mux_sdio_dev *sdio_dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301655 INIT_WORK(&sdio_dev->data_rx_work, rmnet_mux_sdio_data_rx_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656
Manu Gautam8e0719b2011-09-26 14:47:55 +05301657 INIT_DELAYED_WORK(&sdio_dev->open_work, rmnet_mux_open_sdio_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658
1659 INIT_LIST_HEAD(&sdio_dev->rx_idle);
1660 INIT_LIST_HEAD(&sdio_dev->tx_idle);
1661 skb_queue_head_init(&sdio_dev->tx_skb_queue);
1662 skb_queue_head_init(&sdio_dev->rx_skb_queue);
1663}
1664
1665static void
Manu Gautam8e0719b2011-09-26 14:47:55 +05301666rmnet_mux_unbind(struct usb_configuration *c, struct usb_function *f)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301668 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev,
1669 function);
1670 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001672 smd_dev->smd_data.flags = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673}
1674
1675#if defined(CONFIG_DEBUG_FS)
1676#define DEBUG_BUF_SIZE 1024
Manu Gautam8e0719b2011-09-26 14:47:55 +05301677static ssize_t rmnet_mux_read_stats(struct file *file, char __user *ubuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 size_t count, loff_t *ppos)
1679{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301680 struct rmnet_mux_dev *dev = file->private_data;
1681 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
1682 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 char *debug_buf;
1684 unsigned long flags;
1685 int ret;
1686
1687 debug_buf = kmalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
1688 if (!debug_buf)
1689 return -ENOMEM;
1690
1691 spin_lock_irqsave(&dev->lock, flags);
1692 ret = scnprintf(debug_buf, DEBUG_BUF_SIZE,
1693 "dpkts_tomsm: %lu\n"
1694 "dpkts_tomdm: %lu\n"
1695 "cpkts_tomdm: %lu\n"
1696 "dpkts_tolaptop: %lu\n"
1697 "cpkts_tolaptop: %lu\n"
1698 "cbits_to_modem: %lu\n"
1699 "tx skb size: %u\n"
1700 "rx_skb_size: %u\n"
1701 "dpkts_pending_at_dmux: %u\n"
1702 "tx drp cnt: %lu\n"
1703 "cpkts_drp_cnt: %lu\n"
1704 "cpkt_tx_qlen: %lu\n"
1705 "cpkt_rx_qlen_to_modem: %lu\n"
1706 "xport: %s\n"
1707 "ctr_ch_opened: %d\n",
1708 dev->dpkts_tomsm, dev->dpkts_tomdm,
1709 dev->cpkts_tomdm, dev->dpkts_tolaptop,
1710 dev->cpkts_tolaptop, ctrl_dev->cbits_to_modem,
1711 sdio_dev->tx_skb_queue.qlen,
1712 sdio_dev->rx_skb_queue.qlen,
1713 sdio_dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
1714 dev->cpkts_drp_cnt,
1715 ctrl_dev->tx_len, ctrl_dev->rx_len,
1716 xport_to_str(dev->xport), ctrl_dev->opened);
1717
1718 spin_unlock_irqrestore(&dev->lock, flags);
1719
1720 ret = simple_read_from_buffer(ubuf, count, ppos, debug_buf, ret);
1721
1722 kfree(debug_buf);
1723
1724 return ret;
1725}
1726
Manu Gautam8e0719b2011-09-26 14:47:55 +05301727static ssize_t rmnet_mux_reset_stats(struct file *file, const char __user *buf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 size_t count, loff_t *ppos)
1729{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301730 struct rmnet_mux_dev *dev = file->private_data;
1731 struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732
1733 dev->dpkts_tolaptop = 0;
1734 dev->cpkts_tolaptop = 0;
1735 dev->cpkts_tomdm = 0;
1736 dev->dpkts_tomdm = 0;
1737 dev->dpkts_tomsm = 0;
1738 sdio_dev->dpkts_pending_atdmux = 0;
1739 dev->tx_drp_cnt = 0;
1740 dev->cpkts_drp_cnt = 0;
1741 return count;
1742}
1743
Manu Gautam8e0719b2011-09-26 14:47:55 +05301744static int dbg_rmnet_mux_open(struct inode *inode, struct file *file)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745{
1746 file->private_data = inode->i_private;
1747
1748 return 0;
1749}
1750
Manu Gautam8e0719b2011-09-26 14:47:55 +05301751const struct file_operations rmnet_mux_svlte_debug_stats_ops = {
1752 .open = dbg_rmnet_mux_open,
1753 .read = rmnet_mux_read_stats,
1754 .write = rmnet_mux_reset_stats,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755};
1756
Manu Gautam8e0719b2011-09-26 14:47:55 +05301757struct dentry *dent_rmnet_mux;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001758
Manu Gautam8e0719b2011-09-26 14:47:55 +05301759static void rmnet_mux_debugfs_init(struct rmnet_mux_dev *dev)
1760{
1761
1762 dent_rmnet_mux = debugfs_create_dir("usb_rmnet_mux", 0);
1763 if (IS_ERR(dent_rmnet_mux))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764 return;
1765
Manu Gautam8e0719b2011-09-26 14:47:55 +05301766 debugfs_create_file("status", 0444, dent_rmnet_mux, dev,
1767 &rmnet_mux_svlte_debug_stats_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768}
1769#else
Manu Gautam8e0719b2011-09-26 14:47:55 +05301770static void rmnet_mux_debugfs_init(struct rmnet_mux_dev *dev) {}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771#endif
1772
Manu Gautam8e0719b2011-09-26 14:47:55 +05301773int usb_rmnet_mux_ctrl_open(struct inode *inode, struct file *fp)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301775 struct rmnet_mux_dev *dev = rmux_dev;
1776 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 unsigned long flags;
1778
1779 spin_lock_irqsave(&dev->lock, flags);
1780 if (ctrl_dev->opened) {
1781 spin_unlock_irqrestore(&dev->lock, flags);
1782 pr_err("%s: device is already opened\n", __func__);
1783 return -EBUSY;
1784 }
1785
1786 ctrl_dev->opened = 1;
1787 fp->private_data = dev;
1788 spin_unlock_irqrestore(&dev->lock, flags);
1789
1790 return 0;
1791}
1792
1793
Manu Gautam8e0719b2011-09-26 14:47:55 +05301794int usb_rmnet_mux_ctrl_release(struct inode *inode, struct file *fp)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301796 struct rmnet_mux_dev *dev = fp->private_data;
1797 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001798 unsigned long flags;
1799
1800 spin_lock_irqsave(&dev->lock, flags);
1801 ctrl_dev->opened = 0;
1802 fp->private_data = 0;
1803 spin_unlock_irqrestore(&dev->lock, flags);
1804
1805 return 0;
1806}
1807
Manu Gautam8e0719b2011-09-26 14:47:55 +05301808ssize_t usb_rmnet_mux_ctrl_read(struct file *fp,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001809 char __user *buf,
1810 size_t count,
1811 loff_t *ppos)
1812{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301813 struct rmnet_mux_dev *dev = fp->private_data;
1814 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
1815 struct rmnet_mux_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001816 unsigned long flags;
1817 int ret = 0;
1818
1819ctrl_read:
1820 if (!atomic_read(&dev->online)) {
1821 pr_debug("%s: USB cable not connected\n", __func__);
1822 return -ENODEV;
1823 }
1824
1825 spin_lock_irqsave(&dev->lock, flags);
1826 if (list_empty(&ctrl_dev->tx_q)) {
1827 spin_unlock_irqrestore(&dev->lock, flags);
1828 /* Implement sleep and wakeup here */
1829 ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
1830 !list_empty(&ctrl_dev->tx_q) ||
1831 !atomic_read(&dev->online));
1832 if (ret < 0)
1833 return ret;
1834
1835 goto ctrl_read;
1836 }
1837
Manu Gautam8e0719b2011-09-26 14:47:55 +05301838 cpkt = list_first_entry(&ctrl_dev->tx_q, struct rmnet_mux_ctrl_pkt,
1839 list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 if (cpkt->len > count) {
1841 spin_unlock_irqrestore(&dev->lock, flags);
1842 pr_err("%s: cpkt size:%d > buf size:%d\n",
1843 __func__, cpkt->len, count);
1844 return -ENOMEM;
1845 }
1846 list_del(&cpkt->list);
1847 ctrl_dev->tx_len--;
1848 spin_unlock_irqrestore(&dev->lock, flags);
1849
1850 count = cpkt->len;
1851
1852 ret = copy_to_user(buf, cpkt->buf, count);
1853 dev->cpkts_tomdm++;
1854
Manu Gautam8e0719b2011-09-26 14:47:55 +05301855 rmnet_mux_free_ctrl_pkt(cpkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001856
1857 if (ret)
1858 return ret;
1859
1860 return count;
1861}
1862
Manu Gautam8e0719b2011-09-26 14:47:55 +05301863ssize_t usb_rmnet_mux_ctrl_write(struct file *fp,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 const char __user *buf,
1865 size_t count,
1866 loff_t *ppos)
1867{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301868 struct rmnet_mux_dev *dev = fp->private_data;
1869 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
1870 struct rmnet_mux_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871 unsigned long flags;
1872 int ret = 0;
1873
1874 if (!atomic_read(&dev->online)) {
1875 pr_debug("%s: USB cable not connected\n", __func__);
1876 return -ENODEV;
1877 }
1878
1879 if (!count) {
1880 pr_err("%s: zero length ctrl pkt\n", __func__);
1881 return -ENODEV;
1882 }
1883
1884 if (count > MAX_CTRL_PKT_SIZE) {
1885 pr_err("%s: max_pkt_size:%d given_pkt_size:%d\n",
1886 __func__, MAX_CTRL_PKT_SIZE, count);
1887 return -ENOMEM;
1888 }
1889
Manu Gautam8e0719b2011-09-26 14:47:55 +05301890 cpkt = rmnet_mux_alloc_ctrl_pkt(count, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891 if (!cpkt) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05301892 pr_err("%s: cannot allocate rmnet_mux ctrl pkt\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001893 return -ENOMEM;
1894 }
1895
1896 ret = copy_from_user(cpkt->buf, buf, count);
1897 if (ret) {
1898 pr_err("%s: copy_from_user failed err:%d\n",
1899 __func__, ret);
Manu Gautam8e0719b2011-09-26 14:47:55 +05301900 rmnet_mux_free_ctrl_pkt(cpkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 return ret;
1902 }
1903
1904 spin_lock_irqsave(&dev->lock, flags);
1905 ctrl_dev->rx_len++;
1906 list_add(&cpkt->list, &ctrl_dev->rx_q);
1907 spin_unlock_irqrestore(&dev->lock, flags);
1908
1909 ctrl_response_available(dev);
1910
1911 return count;
1912}
1913
1914
Manu Gautam8e0719b2011-09-26 14:47:55 +05301915#define RMNET_MUX_CTRL_GET_DTR _IOR(0xFE, 0, int)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916static long
Manu Gautam8e0719b2011-09-26 14:47:55 +05301917usb_rmnet_mux_ctrl_ioctl(struct file *fp, unsigned c, unsigned long value)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301919 struct rmnet_mux_dev *dev = fp->private_data;
1920 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 unsigned long *temp = (unsigned long *)value;
1922 int ret = 0;
1923
Manu Gautam8e0719b2011-09-26 14:47:55 +05301924 if (c != RMNET_MUX_CTRL_GET_DTR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925 return -ENODEV;
1926
1927 ret = copy_to_user(temp,
1928 &ctrl_dev->cbits_to_modem,
1929 sizeof(*temp));
1930 if (ret)
1931 return ret;
1932
1933 return 0;
1934}
1935
Manu Gautam8e0719b2011-09-26 14:47:55 +05301936static const struct file_operations rmnet_mux_ctrl_fops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 .owner = THIS_MODULE,
Manu Gautam8e0719b2011-09-26 14:47:55 +05301938 .open = usb_rmnet_mux_ctrl_open,
1939 .release = usb_rmnet_mux_ctrl_release,
1940 .read = usb_rmnet_mux_ctrl_read,
1941 .write = usb_rmnet_mux_ctrl_write,
1942 .unlocked_ioctl = usb_rmnet_mux_ctrl_ioctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943};
1944
Manu Gautam8e0719b2011-09-26 14:47:55 +05301945static struct miscdevice rmnet_mux_ctrl_dev = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 .minor = MISC_DYNAMIC_MINOR,
Manu Gautam8e0719b2011-09-26 14:47:55 +05301947 .name = "rmnet_mux_ctrl",
1948 .fops = &rmnet_mux_ctrl_fops,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949};
1950
Manu Gautam8e0719b2011-09-26 14:47:55 +05301951static int rmnet_mux_ctrl_device_init(struct rmnet_mux_dev *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952{
1953 int ret;
Manu Gautam8e0719b2011-09-26 14:47:55 +05301954 struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955
1956 INIT_LIST_HEAD(&ctrl_dev->tx_q);
1957 INIT_LIST_HEAD(&ctrl_dev->rx_q);
1958 init_waitqueue_head(&ctrl_dev->tx_wait_q);
1959
Manu Gautam8e0719b2011-09-26 14:47:55 +05301960 ret = misc_register(&rmnet_mux_ctrl_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 if (ret) {
1962 pr_err("%s: failed to register misc device\n", __func__);
1963 return ret;
1964 }
1965
1966 return 0;
1967}
1968
Manu Gautam8e0719b2011-09-26 14:47:55 +05301969static int rmnet_smd_sdio_function_add(struct usb_configuration *c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001970{
Manu Gautam8e0719b2011-09-26 14:47:55 +05301971 struct rmnet_mux_dev *dev = rmux_dev;
1972
1973 if (!dev)
1974 return -ENODEV;
1975
1976 pr_debug("rmnet_smd_sdio_function_add\n");
1977
1978 dev->function.name = "rmnet_smd_sdio";
1979 dev->function.strings = rmnet_mux_strings;
1980 dev->function.descriptors = rmnet_mux_fs_function;
1981 dev->function.hs_descriptors = rmnet_mux_hs_function;
1982 dev->function.bind = rmnet_mux_bind;
1983 dev->function.unbind = rmnet_mux_unbind;
1984 dev->function.setup = rmnet_mux_setup;
1985 dev->function.set_alt = rmnet_mux_set_alt;
1986 dev->function.disable = rmnet_mux_disable;
1987 dev->function.suspend = rmnet_mux_suspend;
1988
1989 return usb_add_function(c, &dev->function);
1990}
1991
1992static int rmnet_smd_sdio_init(void)
1993{
1994 struct rmnet_mux_dev *dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 int ret;
1996
1997 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1998 if (!dev)
1999 return -ENOMEM;
2000
Manu Gautam8e0719b2011-09-26 14:47:55 +05302001 rmux_dev = dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002002
Manu Gautam8e0719b2011-09-26 14:47:55 +05302003 dev->wq = create_singlethread_workqueue("k_rmnet_mux_work");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004 if (!dev->wq) {
2005 ret = -ENOMEM;
2006 goto free_dev;
2007 }
2008
2009 spin_lock_init(&dev->lock);
2010 atomic_set(&dev->notify_count, 0);
2011 atomic_set(&dev->online, 0);
Manu Gautam8e0719b2011-09-26 14:47:55 +05302012 INIT_WORK(&dev->disconnect_work, rmnet_mux_disconnect_work);
2013 rmnet_mux_smd_init(&dev->smd_dev);
2014 rmnet_mux_sdio_init(&dev->sdio_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015
Manu Gautam8e0719b2011-09-26 14:47:55 +05302016 ret = rmnet_mux_ctrl_device_init(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 if (ret) {
Manu Gautam8e0719b2011-09-26 14:47:55 +05302018 pr_debug("%s: rmnet_mux_ctrl_device_init failed, err:%d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 __func__, ret);
2020 goto free_wq;
2021 }
2022
Manu Gautam8e0719b2011-09-26 14:47:55 +05302023 rmnet_mux_debugfs_init(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024
2025 return 0;
2026
2027free_wq:
2028 destroy_workqueue(dev->wq);
2029free_dev:
2030 kfree(dev);
2031
2032 return ret;
2033}
2034
Manu Gautam8e0719b2011-09-26 14:47:55 +05302035static void rmnet_smd_sdio_cleanup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036{
Manu Gautam8e0719b2011-09-26 14:47:55 +05302037 struct rmnet_mux_dev *dev = rmux_dev;
2038 struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002039
Manu Gautam8e0719b2011-09-26 14:47:55 +05302040 debugfs_remove_recursive(dent_rmnet_mux);
2041 misc_deregister(&rmnet_mux_ctrl_dev);
2042 smd_close(smd_dev->smd_data.ch);
2043 destroy_workqueue(dev->wq);
2044 kfree(dev);
2045}