blob: 4a455b6d8ce8b43936713649070c25ce5895cf22 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Duy Truong790f06d2013-02-13 16:38:12 -08002 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohenfdecb602012-11-16 15:50:01 +020021#include <mach/usb_bam.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include "u_rmnet.h"
24#include "gadget_chips.h"
25
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#define RMNET_NOTIFY_INTERVAL 5
27#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#define ACM_CTRL_DTR (1 << 0)
31
32/* TODO: use separate structures for data and
33 * control paths
34 */
35struct f_rmnet {
36 struct grmnet port;
37 int ifc_id;
38 u8 port_num;
39 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070040 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041 struct usb_composite_dev *cdev;
42
43 spinlock_t lock;
44
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 /* usb eps*/
46 struct usb_ep *notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047 struct usb_request *notify_req;
48
49 /* control info */
50 struct list_head cpkt_resp_q;
51 atomic_t notify_count;
52 unsigned long cpkts_len;
53};
54
Anna Perel21515162012-02-02 20:50:02 +020055#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053056static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070057static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080058static unsigned int no_ctrl_hsic_ports;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +053059static unsigned int no_ctrl_hsuart_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070060static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020061static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080062static unsigned int no_data_hsic_ports;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +053063static unsigned int no_data_hsuart_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070065 enum transport_type data_xport;
66 enum transport_type ctrl_xport;
67 unsigned data_xport_num;
68 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069 unsigned port_num;
70 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053071} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
73static struct usb_interface_descriptor rmnet_interface_desc = {
74 .bLength = USB_DT_INTERFACE_SIZE,
75 .bDescriptorType = USB_DT_INTERFACE,
76 .bNumEndpoints = 3,
77 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
78 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
79 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
80 /* .iInterface = DYNAMIC */
81};
82
83/* Full speed support */
84static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
85 .bLength = USB_DT_ENDPOINT_SIZE,
86 .bDescriptorType = USB_DT_ENDPOINT,
87 .bEndpointAddress = USB_DIR_IN,
88 .bmAttributes = USB_ENDPOINT_XFER_INT,
89 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
90 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
91};
92
93static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
94 .bLength = USB_DT_ENDPOINT_SIZE,
95 .bDescriptorType = USB_DT_ENDPOINT,
96 .bEndpointAddress = USB_DIR_IN,
97 .bmAttributes = USB_ENDPOINT_XFER_BULK,
98 .wMaxPacketSize = __constant_cpu_to_le16(64),
99};
100
101static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
102 .bLength = USB_DT_ENDPOINT_SIZE,
103 .bDescriptorType = USB_DT_ENDPOINT,
104 .bEndpointAddress = USB_DIR_OUT,
105 .bmAttributes = USB_ENDPOINT_XFER_BULK,
106 .wMaxPacketSize = __constant_cpu_to_le16(64),
107};
108
109static struct usb_descriptor_header *rmnet_fs_function[] = {
110 (struct usb_descriptor_header *) &rmnet_interface_desc,
111 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
112 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
113 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
114 NULL,
115};
116
117/* High speed support */
118static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
119 .bLength = USB_DT_ENDPOINT_SIZE,
120 .bDescriptorType = USB_DT_ENDPOINT,
121 .bEndpointAddress = USB_DIR_IN,
122 .bmAttributes = USB_ENDPOINT_XFER_INT,
123 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
124 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
125};
126
127static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
128 .bLength = USB_DT_ENDPOINT_SIZE,
129 .bDescriptorType = USB_DT_ENDPOINT,
130 .bEndpointAddress = USB_DIR_IN,
131 .bmAttributes = USB_ENDPOINT_XFER_BULK,
132 .wMaxPacketSize = __constant_cpu_to_le16(512),
133};
134
135static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
136 .bLength = USB_DT_ENDPOINT_SIZE,
137 .bDescriptorType = USB_DT_ENDPOINT,
138 .bEndpointAddress = USB_DIR_OUT,
139 .bmAttributes = USB_ENDPOINT_XFER_BULK,
140 .wMaxPacketSize = __constant_cpu_to_le16(512),
141};
142
143static struct usb_descriptor_header *rmnet_hs_function[] = {
144 (struct usb_descriptor_header *) &rmnet_interface_desc,
145 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
146 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
147 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
148 NULL,
149};
150
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530151/* Super speed support */
152static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
Vijayavardhan Vennapusa724ae312012-11-20 17:36:21 +0530153 .bLength = USB_DT_ENDPOINT_SIZE,
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530154 .bDescriptorType = USB_DT_ENDPOINT,
155 .bEndpointAddress = USB_DIR_IN,
156 .bmAttributes = USB_ENDPOINT_XFER_INT,
157 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
158 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
159};
160
161static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
162 .bLength = sizeof rmnet_ss_notify_comp_desc,
163 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
164
165 /* the following 3 values can be tweaked if necessary */
166 /* .bMaxBurst = 0, */
167 /* .bmAttributes = 0, */
168 .wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
169};
170
171static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
Vijayavardhan Vennapusa724ae312012-11-20 17:36:21 +0530172 .bLength = USB_DT_ENDPOINT_SIZE,
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530173 .bDescriptorType = USB_DT_ENDPOINT,
174 .bEndpointAddress = USB_DIR_IN,
175 .bmAttributes = USB_ENDPOINT_XFER_BULK,
176 .wMaxPacketSize = __constant_cpu_to_le16(1024),
177};
178
179static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
180 .bLength = sizeof rmnet_ss_in_comp_desc,
181 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
182
183 /* the following 2 values can be tweaked if necessary */
184 /* .bMaxBurst = 0, */
185 /* .bmAttributes = 0, */
186};
187
188static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
Vijayavardhan Vennapusa724ae312012-11-20 17:36:21 +0530189 .bLength = USB_DT_ENDPOINT_SIZE,
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530190 .bDescriptorType = USB_DT_ENDPOINT,
191 .bEndpointAddress = USB_DIR_OUT,
192 .bmAttributes = USB_ENDPOINT_XFER_BULK,
193 .wMaxPacketSize = __constant_cpu_to_le16(1024),
194};
195
196static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
197 .bLength = sizeof rmnet_ss_out_comp_desc,
198 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
199
200 /* the following 2 values can be tweaked if necessary */
201 /* .bMaxBurst = 0, */
202 /* .bmAttributes = 0, */
203};
204
205static struct usb_descriptor_header *rmnet_ss_function[] = {
206 (struct usb_descriptor_header *) &rmnet_interface_desc,
207 (struct usb_descriptor_header *) &rmnet_ss_notify_desc,
208 (struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
209 (struct usb_descriptor_header *) &rmnet_ss_in_desc,
210 (struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
211 (struct usb_descriptor_header *) &rmnet_ss_out_desc,
212 (struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
213 NULL,
214};
215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216/* String descriptors */
217
218static struct usb_string rmnet_string_defs[] = {
219 [0].s = "RmNet",
220 { } /* end of list */
221};
222
223static struct usb_gadget_strings rmnet_string_table = {
224 .language = 0x0409, /* en-us */
225 .strings = rmnet_string_defs,
226};
227
228static struct usb_gadget_strings *rmnet_strings[] = {
229 &rmnet_string_table,
230 NULL,
231};
232
Amit Blay2d4fb632012-05-29 18:05:38 +0300233static void frmnet_ctrl_response_available(struct f_rmnet *dev);
234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235/* ------- misc functions --------------------*/
236
237static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
238{
239 return container_of(f, struct f_rmnet, port.func);
240}
241
242static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
243{
244 return container_of(r, struct f_rmnet, port);
245}
246
247static struct usb_request *
248frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
249{
250 struct usb_request *req;
251
252 req = usb_ep_alloc_request(ep, flags);
253 if (!req)
254 return ERR_PTR(-ENOMEM);
255
256 req->buf = kmalloc(len, flags);
257 if (!req->buf) {
258 usb_ep_free_request(ep, req);
259 return ERR_PTR(-ENOMEM);
260 }
261
262 req->length = len;
263
264 return req;
265}
266
267void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
268{
269 kfree(req->buf);
270 usb_ep_free_request(ep, req);
271}
272
273static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
274{
275 struct rmnet_ctrl_pkt *pkt;
276
277 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
278 if (!pkt)
279 return ERR_PTR(-ENOMEM);
280
281 pkt->buf = kmalloc(len, flags);
282 if (!pkt->buf) {
283 kfree(pkt);
284 return ERR_PTR(-ENOMEM);
285 }
286 pkt->len = len;
287
288 return pkt;
289}
290
291static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
292{
293 kfree(pkt->buf);
294 kfree(pkt);
295}
296
297/* -------------------------------------------*/
298
Hemant Kumar1b820d52011-11-03 15:08:28 -0700299static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300{
Jack Pham427f6922011-11-23 19:42:00 -0800301 int ret;
302 int port_idx;
303 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530305 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
306 " smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
Ofir Cohena1c2a872011-12-14 10:26:34 +0200307 " nr_rmnet_ports: %u\n",
308 __func__, no_data_bam_ports, no_data_bam2bam_ports,
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530309 no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
310 no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311
Ofir Cohena1c2a872011-12-14 10:26:34 +0200312 if (no_data_bam_ports || no_data_bam2bam_ports) {
313 ret = gbam_setup(no_data_bam_ports,
314 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700315 if (ret)
316 return ret;
317 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318
Hemant Kumar1b820d52011-11-03 15:08:28 -0700319 if (no_ctrl_smd_ports) {
320 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
321 if (ret)
322 return ret;
323 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324
Jack Pham427f6922011-11-23 19:42:00 -0800325 if (no_data_hsic_ports) {
326 port_idx = ghsic_data_setup(no_data_hsic_ports,
327 USB_GADGET_RMNET);
328 if (port_idx < 0)
329 return port_idx;
330 for (i = 0; i < nr_rmnet_ports; i++) {
331 if (rmnet_ports[i].data_xport ==
332 USB_GADGET_XPORT_HSIC) {
333 rmnet_ports[i].data_xport_num = port_idx;
334 port_idx++;
335 }
336 }
337 }
338
339 if (no_ctrl_hsic_ports) {
340 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
341 USB_GADGET_RMNET);
342 if (port_idx < 0)
343 return port_idx;
344 for (i = 0; i < nr_rmnet_ports; i++) {
345 if (rmnet_ports[i].ctrl_xport ==
346 USB_GADGET_XPORT_HSIC) {
347 rmnet_ports[i].ctrl_xport_num = port_idx;
348 port_idx++;
349 }
350 }
351 }
352
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530353 if (no_data_hsuart_ports) {
354 port_idx = ghsuart_data_setup(no_data_hsuart_ports,
355 USB_GADGET_RMNET);
356 if (port_idx < 0)
357 return port_idx;
358 for (i = 0; i < nr_rmnet_ports; i++) {
359 if (rmnet_ports[i].data_xport ==
360 USB_GADGET_XPORT_HSUART) {
361 rmnet_ports[i].data_xport_num = port_idx;
362 port_idx++;
363 }
364 }
365 }
366
367 if (no_ctrl_hsuart_ports) {
368 port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports,
369 USB_GADGET_RMNET);
370 if (port_idx < 0)
371 return port_idx;
372 for (i = 0; i < nr_rmnet_ports; i++) {
373 if (rmnet_ports[i].ctrl_xport ==
374 USB_GADGET_XPORT_HSUART) {
375 rmnet_ports[i].ctrl_xport_num = port_idx;
376 port_idx++;
377 }
378 }
379 }
380
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 return 0;
382}
383
Manu Gautam2b0234a2011-09-07 16:47:52 +0530384static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700386 int ret;
387 unsigned port_num;
388 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
389 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390
Hemant Kumar1b820d52011-11-03 15:08:28 -0700391 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
392 __func__, xport_to_str(cxport), xport_to_str(dxport),
393 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394
Hemant Kumar1b820d52011-11-03 15:08:28 -0700395 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
396 switch (cxport) {
397 case USB_GADGET_XPORT_SMD:
398 ret = gsmd_ctrl_connect(&dev->port, port_num);
399 if (ret) {
400 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
401 __func__, ret);
402 return ret;
403 }
404 break;
Jack Pham427f6922011-11-23 19:42:00 -0800405 case USB_GADGET_XPORT_HSIC:
406 ret = ghsic_ctrl_connect(&dev->port, port_num);
407 if (ret) {
408 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
409 __func__, ret);
410 return ret;
411 }
412 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530413 case USB_GADGET_XPORT_HSUART:
414 ret = ghsuart_ctrl_connect(&dev->port, port_num);
415 if (ret) {
416 pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n",
417 __func__, ret);
418 return ret;
419 }
420 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700421 case USB_GADGET_XPORT_NONE:
422 break;
423 default:
424 pr_err("%s: Un-supported transport: %s\n", __func__,
425 xport_to_str(cxport));
426 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 }
428
Hemant Kumar1b820d52011-11-03 15:08:28 -0700429 port_num = rmnet_ports[dev->port_num].data_xport_num;
430 switch (dxport) {
431 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200432 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200433 case USB_GADGET_XPORT_BAM2BAM_IPA:
434 ret = gbam_connect(&dev->port, port_num,
Ofir Cohen77848d62012-12-05 13:16:10 +0200435 dxport, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700436 if (ret) {
437 pr_err("%s: gbam_connect failed: err:%d\n",
438 __func__, ret);
439 gsmd_ctrl_disconnect(&dev->port, port_num);
440 return ret;
441 }
442 break;
Jack Pham427f6922011-11-23 19:42:00 -0800443 case USB_GADGET_XPORT_HSIC:
444 ret = ghsic_data_connect(&dev->port, port_num);
445 if (ret) {
446 pr_err("%s: ghsic_data_connect failed: err:%d\n",
447 __func__, ret);
448 ghsic_ctrl_disconnect(&dev->port, port_num);
449 return ret;
450 }
451 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530452 case USB_GADGET_XPORT_HSUART:
453 ret = ghsuart_data_connect(&dev->port, port_num);
454 if (ret) {
455 pr_err("%s: ghsuart_data_connect failed: err:%d\n",
456 __func__, ret);
457 ghsuart_ctrl_disconnect(&dev->port, port_num);
458 return ret;
459 }
460 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700461 case USB_GADGET_XPORT_NONE:
462 break;
463 default:
464 pr_err("%s: Un-supported transport: %s\n", __func__,
465 xport_to_str(dxport));
466 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 }
468
469 return 0;
470}
471
Manu Gautam2b0234a2011-09-07 16:47:52 +0530472static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700474 unsigned port_num;
475 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
476 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477
Hemant Kumar1b820d52011-11-03 15:08:28 -0700478 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
479 __func__, xport_to_str(cxport), xport_to_str(dxport),
480 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481
Hemant Kumar1b820d52011-11-03 15:08:28 -0700482 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
483 switch (cxport) {
484 case USB_GADGET_XPORT_SMD:
485 gsmd_ctrl_disconnect(&dev->port, port_num);
486 break;
Jack Pham427f6922011-11-23 19:42:00 -0800487 case USB_GADGET_XPORT_HSIC:
488 ghsic_ctrl_disconnect(&dev->port, port_num);
489 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530490 case USB_GADGET_XPORT_HSUART:
491 ghsuart_ctrl_disconnect(&dev->port, port_num);
492 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700493 case USB_GADGET_XPORT_NONE:
494 break;
495 default:
496 pr_err("%s: Un-supported transport: %s\n", __func__,
497 xport_to_str(cxport));
498 return -ENODEV;
499 }
500
501 port_num = rmnet_ports[dev->port_num].data_xport_num;
502 switch (dxport) {
503 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200504 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200505 case USB_GADGET_XPORT_BAM2BAM_IPA:
Ofir Cohen77848d62012-12-05 13:16:10 +0200506 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700507 break;
Jack Pham427f6922011-11-23 19:42:00 -0800508 case USB_GADGET_XPORT_HSIC:
509 ghsic_data_disconnect(&dev->port, port_num);
510 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530511 case USB_GADGET_XPORT_HSUART:
512 ghsuart_data_disconnect(&dev->port, port_num);
513 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700514 case USB_GADGET_XPORT_NONE:
515 break;
516 default:
517 pr_err("%s: Un-supported transport: %s\n", __func__,
518 xport_to_str(dxport));
519 return -ENODEV;
520 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521
522 return 0;
523}
524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
526{
527 struct f_rmnet *dev = func_to_rmnet(f);
528
529 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
530
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530531 if (gadget_is_superspeed(c->cdev->gadget))
532 usb_free_descriptors(f->ss_descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 if (gadget_is_dualspeed(c->cdev->gadget))
534 usb_free_descriptors(f->hs_descriptors);
535 usb_free_descriptors(f->descriptors);
536
537 frmnet_free_req(dev->notify, dev->notify_req);
538
Manu Gautamdd4222b2011-09-09 15:06:05 +0530539 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540}
541
Amit Blaye5bb35e2012-05-08 20:38:20 +0300542static void frmnet_suspend(struct usb_function *f)
543{
544 struct f_rmnet *dev = func_to_rmnet(f);
545 unsigned port_num;
546 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
547
548 pr_debug("%s: data xport: %s dev: %p portno: %d\n",
549 __func__, xport_to_str(dxport),
550 dev, dev->port_num);
551
552 port_num = rmnet_ports[dev->port_num].data_xport_num;
553 switch (dxport) {
554 case USB_GADGET_XPORT_BAM:
555 break;
556 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200557 case USB_GADGET_XPORT_BAM2BAM_IPA:
Amit Blaye5bb35e2012-05-08 20:38:20 +0300558 gbam_suspend(&dev->port, port_num, dxport);
559 break;
560 case USB_GADGET_XPORT_HSIC:
561 break;
Mayank Rana9dc199a2012-08-28 10:22:07 +0530562 case USB_GADGET_XPORT_HSUART:
563 break;
Amit Blaye5bb35e2012-05-08 20:38:20 +0300564 case USB_GADGET_XPORT_NONE:
565 break;
566 default:
567 pr_err("%s: Un-supported transport: %s\n", __func__,
568 xport_to_str(dxport));
569 }
570}
571
572static void frmnet_resume(struct usb_function *f)
573{
574 struct f_rmnet *dev = func_to_rmnet(f);
575 unsigned port_num;
576 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
577
578 pr_debug("%s: data xport: %s dev: %p portno: %d\n",
579 __func__, xport_to_str(dxport),
580 dev, dev->port_num);
581
582 port_num = rmnet_ports[dev->port_num].data_xport_num;
583 switch (dxport) {
584 case USB_GADGET_XPORT_BAM:
585 break;
586 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200587 case USB_GADGET_XPORT_BAM2BAM_IPA:
Amit Blaye5bb35e2012-05-08 20:38:20 +0300588 gbam_resume(&dev->port, port_num, dxport);
589 break;
590 case USB_GADGET_XPORT_HSIC:
591 break;
Mayank Rana9dc199a2012-08-28 10:22:07 +0530592 case USB_GADGET_XPORT_HSUART:
593 break;
Amit Blaye5bb35e2012-05-08 20:38:20 +0300594 case USB_GADGET_XPORT_NONE:
595 break;
596 default:
597 pr_err("%s: Un-supported transport: %s\n", __func__,
598 xport_to_str(dxport));
599 }
600}
601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602static void frmnet_disable(struct usb_function *f)
603{
604 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700605 unsigned long flags;
606 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607
608 pr_debug("%s: port#%d\n", __func__, dev->port_num);
609
610 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200611 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612
613 atomic_set(&dev->online, 0);
614
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700615 spin_lock_irqsave(&dev->lock, flags);
616 while (!list_empty(&dev->cpkt_resp_q)) {
617 cpkt = list_first_entry(&dev->cpkt_resp_q,
618 struct rmnet_ctrl_pkt, list);
619
620 list_del(&cpkt->list);
621 rmnet_free_ctrl_pkt(cpkt);
622 }
623 atomic_set(&dev->notify_count, 0);
624 spin_unlock_irqrestore(&dev->lock, flags);
625
Manu Gautam2b0234a2011-09-07 16:47:52 +0530626 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627}
628
629static int
630frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
631{
632 struct f_rmnet *dev = func_to_rmnet(f);
633 struct usb_composite_dev *cdev = dev->cdev;
634 int ret;
Amit Blay2d4fb632012-05-29 18:05:38 +0300635 struct list_head *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636
637 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
638
639 if (dev->notify->driver_data) {
640 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
641 usb_ep_disable(dev->notify);
642 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200643
644 ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
645 if (ret) {
646 dev->notify->desc = NULL;
647 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
648 dev->notify->name, ret);
649 return ret;
650 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300651 ret = usb_ep_enable(dev->notify);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 if (ret) {
654 pr_err("%s: usb ep#%s enable failed, err#%d\n",
655 __func__, dev->notify->name, ret);
656 return ret;
657 }
658 dev->notify->driver_data = dev;
659
Chiranjeevi Velempati502b1c82012-05-16 14:49:46 +0530660 if (!dev->port.in->desc || !dev->port.out->desc) {
Bar Weiner0fc137a2012-03-28 16:58:09 +0200661 if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
662 config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
663 dev->port.in->desc = NULL;
664 dev->port.out->desc = NULL;
665 return -EINVAL;
666 }
667 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 }
669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 atomic_set(&dev->online, 1);
671
Amit Blay2d4fb632012-05-29 18:05:38 +0300672 /* In case notifications were aborted, but there are pending control
673 packets in the response queue, re-add the notifications */
674 list_for_each(cpkt, &dev->cpkt_resp_q)
675 frmnet_ctrl_response_available(dev);
676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 return ret;
678}
679
680static void frmnet_ctrl_response_available(struct f_rmnet *dev)
681{
682 struct usb_request *req = dev->notify_req;
683 struct usb_cdc_notification *event;
684 unsigned long flags;
685 int ret;
Anna Perelf3af59d2012-08-12 15:28:30 +0300686 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687
688 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
689
690 spin_lock_irqsave(&dev->lock, flags);
691 if (!atomic_read(&dev->online) || !req || !req->buf) {
692 spin_unlock_irqrestore(&dev->lock, flags);
693 return;
694 }
695
696 if (atomic_inc_return(&dev->notify_count) != 1) {
697 spin_unlock_irqrestore(&dev->lock, flags);
698 return;
699 }
700
701 event = req->buf;
702 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
703 | USB_RECIP_INTERFACE;
704 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
705 event->wValue = cpu_to_le16(0);
706 event->wIndex = cpu_to_le16(dev->ifc_id);
707 event->wLength = cpu_to_le16(0);
708 spin_unlock_irqrestore(&dev->lock, flags);
709
710 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
711 if (ret) {
712 atomic_dec(&dev->notify_count);
Anna Perelf3af59d2012-08-12 15:28:30 +0300713 spin_lock_irqsave(&dev->lock, flags);
714 cpkt = list_first_entry(&dev->cpkt_resp_q,
715 struct rmnet_ctrl_pkt, list);
716 if (cpkt) {
717 list_del(&cpkt->list);
718 rmnet_free_ctrl_pkt(cpkt);
719 }
720 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 pr_debug("ep enqueue error %d\n", ret);
722 }
723}
724
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700725static void frmnet_connect(struct grmnet *gr)
726{
727 struct f_rmnet *dev;
728
729 if (!gr) {
730 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
731 return;
732 }
733
734 dev = port_to_rmnet(gr);
735
736 atomic_set(&dev->ctrl_online, 1);
737}
738
739static void frmnet_disconnect(struct grmnet *gr)
740{
741 struct f_rmnet *dev;
742 unsigned long flags;
743 struct usb_cdc_notification *event;
744 int status;
745 struct rmnet_ctrl_pkt *cpkt;
746
747 if (!gr) {
748 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
749 return;
750 }
751
752 dev = port_to_rmnet(gr);
753
754 atomic_set(&dev->ctrl_online, 0);
755
756 if (!atomic_read(&dev->online)) {
757 pr_debug("%s: nothing to do\n", __func__);
758 return;
759 }
760
761 usb_ep_fifo_flush(dev->notify);
762
763 event = dev->notify_req->buf;
764 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
765 | USB_RECIP_INTERFACE;
766 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
767 event->wValue = cpu_to_le16(0);
768 event->wIndex = cpu_to_le16(dev->ifc_id);
769 event->wLength = cpu_to_le16(0);
770
Vamsi Krishna188078d2011-10-26 15:09:55 -0700771 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700772 if (status < 0) {
773 if (!atomic_read(&dev->online))
774 return;
775 pr_err("%s: rmnet notify ep enqueue error %d\n",
776 __func__, status);
777 }
778
779 spin_lock_irqsave(&dev->lock, flags);
780 while (!list_empty(&dev->cpkt_resp_q)) {
781 cpkt = list_first_entry(&dev->cpkt_resp_q,
782 struct rmnet_ctrl_pkt, list);
783
784 list_del(&cpkt->list);
785 rmnet_free_ctrl_pkt(cpkt);
786 }
787 atomic_set(&dev->notify_count, 0);
788 spin_unlock_irqrestore(&dev->lock, flags);
789
790}
791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700793frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794{
795 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700796 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797 unsigned long flags;
798
Hemant Kumarf60c0252011-11-03 12:37:07 -0700799 if (!gr || !buf) {
800 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
801 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802 return -ENODEV;
803 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700804 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
805 if (IS_ERR(cpkt)) {
806 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
807 return -ENOMEM;
808 }
809 memcpy(cpkt->buf, buf, len);
810 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811
812 dev = port_to_rmnet(gr);
813
814 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
815
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700816 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 rmnet_free_ctrl_pkt(cpkt);
818 return 0;
819 }
820
821 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530822 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 spin_unlock_irqrestore(&dev->lock, flags);
824
825 frmnet_ctrl_response_available(dev);
826
827 return 0;
828}
829
830static void
831frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
832{
833 struct f_rmnet *dev = req->context;
834 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700835 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836
837 if (!dev) {
838 pr_err("%s: rmnet dev is null\n", __func__);
839 return;
840 }
841
842 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
843
844 cdev = dev->cdev;
845
Hemant Kumar1b820d52011-11-03 15:08:28 -0700846 if (dev->port.send_encap_cmd) {
847 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
848 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
849 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850}
851
852static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
853{
854 struct f_rmnet *dev = req->context;
855 int status = req->status;
Anna Perelf3af59d2012-08-12 15:28:30 +0300856 unsigned long flags;
857 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700858
859 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
860
861 switch (status) {
862 case -ECONNRESET:
863 case -ESHUTDOWN:
864 /* connection gone */
865 atomic_set(&dev->notify_count, 0);
866 break;
867 default:
868 pr_err("rmnet notify ep error %d\n", status);
869 /* FALLTHROUGH */
870 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700871 if (!atomic_read(&dev->ctrl_online))
872 break;
873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 if (atomic_dec_and_test(&dev->notify_count))
875 break;
876
877 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
878 if (status) {
879 atomic_dec(&dev->notify_count);
Anna Perelf3af59d2012-08-12 15:28:30 +0300880 spin_lock_irqsave(&dev->lock, flags);
881 cpkt = list_first_entry(&dev->cpkt_resp_q,
882 struct rmnet_ctrl_pkt, list);
883 if (cpkt) {
884 list_del(&cpkt->list);
885 rmnet_free_ctrl_pkt(cpkt);
886 }
887 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 pr_debug("ep enqueue error %d\n", status);
889 }
890 break;
891 }
892}
893
894static int
895frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
896{
897 struct f_rmnet *dev = func_to_rmnet(f);
898 struct usb_composite_dev *cdev = dev->cdev;
899 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700900 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 u16 w_index = le16_to_cpu(ctrl->wIndex);
902 u16 w_value = le16_to_cpu(ctrl->wValue);
903 u16 w_length = le16_to_cpu(ctrl->wLength);
904 int ret = -EOPNOTSUPP;
905
906 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
907
908 if (!atomic_read(&dev->online)) {
909 pr_debug("%s: usb cable is not connected\n", __func__);
910 return -ENOTCONN;
911 }
912
913 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
914
915 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
916 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 ret = w_length;
918 req->complete = frmnet_cmd_complete;
919 req->context = dev;
920 break;
921
922
923 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
924 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
925 if (w_value)
926 goto invalid;
927 else {
928 unsigned len;
929 struct rmnet_ctrl_pkt *cpkt;
930
931 spin_lock(&dev->lock);
932 if (list_empty(&dev->cpkt_resp_q)) {
933 pr_err("ctrl resp queue empty "
934 " req%02x.%02x v%04x i%04x l%d\n",
935 ctrl->bRequestType, ctrl->bRequest,
936 w_value, w_index, w_length);
937 spin_unlock(&dev->lock);
938 goto invalid;
939 }
940
941 cpkt = list_first_entry(&dev->cpkt_resp_q,
942 struct rmnet_ctrl_pkt, list);
943 list_del(&cpkt->list);
944 spin_unlock(&dev->lock);
945
946 len = min_t(unsigned, w_length, cpkt->len);
947 memcpy(req->buf, cpkt->buf, len);
948 ret = len;
949
950 rmnet_free_ctrl_pkt(cpkt);
951 }
952 break;
953 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
954 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700955 if (dev->port.notify_modem) {
956 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
957 dev->port.notify_modem(&dev->port, port_num, w_value);
958 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959 ret = 0;
960
961 break;
962 default:
963
964invalid:
965 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
966 ctrl->bRequestType, ctrl->bRequest,
967 w_value, w_index, w_length);
968 }
969
970 /* respond with data transfer or status phase? */
971 if (ret >= 0) {
972 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
973 ctrl->bRequestType, ctrl->bRequest,
974 w_value, w_index, w_length);
975 req->zero = (ret < w_length);
976 req->length = ret;
977 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
978 if (ret < 0)
979 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
980 }
981
982 return ret;
983}
984
985static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
986{
987 struct f_rmnet *dev = func_to_rmnet(f);
988 struct usb_ep *ep;
989 struct usb_composite_dev *cdev = c->cdev;
990 int ret = -ENODEV;
991
992 dev->ifc_id = usb_interface_id(c, f);
993 if (dev->ifc_id < 0) {
994 pr_err("%s: unable to allocate ifc id, err:%d",
995 __func__, dev->ifc_id);
996 return dev->ifc_id;
997 }
998 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
999
1000 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
1001 if (!ep) {
1002 pr_err("%s: usb epin autoconfig failed\n", __func__);
1003 return -ENODEV;
1004 }
1005 dev->port.in = ep;
1006 ep->driver_data = cdev;
1007
1008 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
1009 if (!ep) {
1010 pr_err("%s: usb epout autoconfig failed\n", __func__);
1011 ret = -ENODEV;
1012 goto ep_auto_out_fail;
1013 }
1014 dev->port.out = ep;
1015 ep->driver_data = cdev;
1016
1017 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
1018 if (!ep) {
1019 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
1020 ret = -ENODEV;
1021 goto ep_auto_notify_fail;
1022 }
1023 dev->notify = ep;
1024 ep->driver_data = cdev;
1025
1026 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -07001027 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028 GFP_KERNEL);
1029 if (IS_ERR(dev->notify_req)) {
1030 pr_err("%s: unable to allocate memory for notify req\n",
1031 __func__);
1032 ret = -ENOMEM;
1033 goto ep_notify_alloc_fail;
1034 }
1035
1036 dev->notify_req->complete = frmnet_notify_complete;
1037 dev->notify_req->context = dev;
1038
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301039 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
1041
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301042 if (!f->descriptors)
1043 goto fail;
1044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 if (gadget_is_dualspeed(cdev->gadget)) {
1046 rmnet_hs_in_desc.bEndpointAddress =
1047 rmnet_fs_in_desc.bEndpointAddress;
1048 rmnet_hs_out_desc.bEndpointAddress =
1049 rmnet_fs_out_desc.bEndpointAddress;
1050 rmnet_hs_notify_desc.bEndpointAddress =
1051 rmnet_fs_notify_desc.bEndpointAddress;
1052
1053 /* copy descriptors, and track endpoint copies */
1054 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
1055
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301056 if (!f->hs_descriptors)
1057 goto fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 }
1059
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301060 if (gadget_is_superspeed(cdev->gadget)) {
1061 rmnet_ss_in_desc.bEndpointAddress =
1062 rmnet_fs_in_desc.bEndpointAddress;
1063 rmnet_ss_out_desc.bEndpointAddress =
1064 rmnet_fs_out_desc.bEndpointAddress;
1065 rmnet_ss_notify_desc.bEndpointAddress =
1066 rmnet_fs_notify_desc.bEndpointAddress;
1067
1068 /* copy descriptors, and track endpoint copies */
1069 f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
1070
1071 if (!f->ss_descriptors)
1072 goto fail;
1073 }
1074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
1076 __func__, dev->port_num,
1077 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
1078 dev->port.in->name, dev->port.out->name);
1079
1080 return 0;
1081
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301082fail:
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301083 if (f->ss_descriptors)
1084 usb_free_descriptors(f->ss_descriptors);
1085 if (f->hs_descriptors)
1086 usb_free_descriptors(f->hs_descriptors);
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301087 if (f->descriptors)
1088 usb_free_descriptors(f->descriptors);
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301089 if (dev->notify_req)
1090 frmnet_free_req(dev->notify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091ep_notify_alloc_fail:
1092 dev->notify->driver_data = NULL;
1093 dev->notify = NULL;
1094ep_auto_notify_fail:
1095 dev->port.out->driver_data = NULL;
1096 dev->port.out = NULL;
1097ep_auto_out_fail:
1098 dev->port.in->driver_data = NULL;
1099 dev->port.in = NULL;
1100
1101 return ret;
1102}
1103
Manu Gautam2b0234a2011-09-07 16:47:52 +05301104static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 int status;
1107 struct f_rmnet *dev;
1108 struct usb_function *f;
1109 unsigned long flags;
1110
1111 pr_debug("%s: usb config:%p\n", __func__, c);
1112
Manu Gautam2b0234a2011-09-07 16:47:52 +05301113 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +05301115 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 return -ENODEV;
1117 }
1118
1119 if (rmnet_string_defs[0].id == 0) {
1120 status = usb_string_id(c->cdev);
1121 if (status < 0) {
1122 pr_err("%s: failed to get string id, err:%d\n",
1123 __func__, status);
1124 return status;
1125 }
1126 rmnet_string_defs[0].id = status;
1127 }
1128
Manu Gautam2b0234a2011-09-07 16:47:52 +05301129 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130
1131 spin_lock_irqsave(&dev->lock, flags);
1132 dev->cdev = c->cdev;
1133 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -07001134 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -07001136 if (!f->name) {
1137 pr_err("%s: cannot allocate memory for name\n", __func__);
1138 return -ENOMEM;
1139 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140
1141 f->strings = rmnet_strings;
1142 f->bind = frmnet_bind;
1143 f->unbind = frmnet_unbind;
1144 f->disable = frmnet_disable;
1145 f->set_alt = frmnet_set_alt;
1146 f->setup = frmnet_setup;
Amit Blaye5bb35e2012-05-08 20:38:20 +03001147 f->suspend = frmnet_suspend;
1148 f->resume = frmnet_resume;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -07001150 dev->port.disconnect = frmnet_disconnect;
1151 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152
1153 status = usb_add_function(c, f);
1154 if (status) {
1155 pr_err("%s: usb add function failed: %d\n",
1156 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +05301157 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 return status;
1159 }
1160
1161 pr_debug("%s: complete\n", __func__);
1162
1163 return status;
1164}
1165
Manu Gautame3e897c2011-09-12 17:18:46 +05301166static void frmnet_cleanup(void)
1167{
1168 int i;
1169
1170 for (i = 0; i < nr_rmnet_ports; i++)
1171 kfree(rmnet_ports[i].port);
1172
1173 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001174 no_ctrl_smd_ports = 0;
1175 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001176 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001177 no_ctrl_hsic_ports = 0;
1178 no_data_hsic_ports = 0;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301179 no_ctrl_hsuart_ports = 0;
1180 no_data_hsuart_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +05301181}
1182
Hemant Kumar1b820d52011-11-03 15:08:28 -07001183static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184{
Hemant Kumar1b820d52011-11-03 15:08:28 -07001185 struct f_rmnet *dev;
1186 struct rmnet_ports *rmnet_port;
1187 int ret;
1188 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189
Hemant Kumar1b820d52011-11-03 15:08:28 -07001190 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
1191 pr_err("%s: Max-%d instances supported\n",
1192 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +05301193 return -EINVAL;
1194 }
1195
Hemant Kumar1b820d52011-11-03 15:08:28 -07001196 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
1197 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198
Hemant Kumar1b820d52011-11-03 15:08:28 -07001199 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
1200 if (!dev) {
1201 pr_err("%s: Unable to allocate rmnet device\n", __func__);
1202 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 }
1204
Hemant Kumar1b820d52011-11-03 15:08:28 -07001205 dev->port_num = nr_rmnet_ports;
1206 spin_lock_init(&dev->lock);
1207 INIT_LIST_HEAD(&dev->cpkt_resp_q);
1208
1209 rmnet_port = &rmnet_ports[nr_rmnet_ports];
1210 rmnet_port->port = dev;
1211 rmnet_port->port_num = nr_rmnet_ports;
1212 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1213 rmnet_port->data_xport = str_to_xport(data_name);
1214
1215 switch (rmnet_port->ctrl_xport) {
1216 case USB_GADGET_XPORT_SMD:
1217 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1218 no_ctrl_smd_ports++;
1219 break;
Jack Pham427f6922011-11-23 19:42:00 -08001220 case USB_GADGET_XPORT_HSIC:
1221 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1222 no_ctrl_hsic_ports++;
1223 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301224 case USB_GADGET_XPORT_HSUART:
1225 rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
1226 no_ctrl_hsuart_ports++;
1227 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001228 case USB_GADGET_XPORT_NONE:
1229 break;
1230 default:
1231 pr_err("%s: Un-supported transport: %u\n", __func__,
1232 rmnet_port->ctrl_xport);
1233 ret = -ENODEV;
1234 goto fail_probe;
1235 }
1236
1237 switch (rmnet_port->data_xport) {
1238 case USB_GADGET_XPORT_BAM:
1239 rmnet_port->data_xport_num = no_data_bam_ports;
1240 no_data_bam_ports++;
1241 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001242 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +02001243 case USB_GADGET_XPORT_BAM2BAM_IPA:
Ofir Cohena1c2a872011-12-14 10:26:34 +02001244 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1245 no_data_bam2bam_ports++;
1246 break;
Jack Pham427f6922011-11-23 19:42:00 -08001247 case USB_GADGET_XPORT_HSIC:
1248 rmnet_port->data_xport_num = no_data_hsic_ports;
1249 no_data_hsic_ports++;
1250 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301251 case USB_GADGET_XPORT_HSUART:
1252 rmnet_port->data_xport_num = no_data_hsuart_ports;
1253 no_data_hsuart_ports++;
1254 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001255 case USB_GADGET_XPORT_NONE:
1256 break;
1257 default:
1258 pr_err("%s: Un-supported transport: %u\n", __func__,
1259 rmnet_port->data_xport);
1260 ret = -ENODEV;
1261 goto fail_probe;
1262 }
1263 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264
1265 return 0;
1266
1267fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301268 for (i = 0; i < nr_rmnet_ports; i++)
1269 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270
Hemant Kumar1b820d52011-11-03 15:08:28 -07001271 nr_rmnet_ports = 0;
1272 no_ctrl_smd_ports = 0;
1273 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001274 no_ctrl_hsic_ports = 0;
1275 no_data_hsic_ports = 0;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301276 no_ctrl_hsuart_ports = 0;
1277 no_data_hsuart_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 return ret;
1280}