blob: 2fa8c63a7dbbd0853023f58e765ade061e8537de [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Ido Shayevitz909ed382013-02-15 02:25:35 +02002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/spinlock.h>
18
Hemant Kumar1b820d52011-11-03 15:08:28 -070019#include <mach/usb_gadget_xport.h>
Ofir Cohenfdecb602012-11-16 15:50:01 +020020#include <mach/usb_bam.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#define ACM_CTRL_DTR (1 << 0)
30
31/* TODO: use separate structures for data and
32 * control paths
33 */
34struct f_rmnet {
35 struct grmnet port;
36 int ifc_id;
37 u8 port_num;
38 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070039 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 struct usb_composite_dev *cdev;
41
42 spinlock_t lock;
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 /* usb eps*/
45 struct usb_ep *notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046 struct usb_request *notify_req;
47
48 /* control info */
49 struct list_head cpkt_resp_q;
50 atomic_t notify_count;
51 unsigned long cpkts_len;
52};
53
Anna Perel21515162012-02-02 20:50:02 +020054#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053055static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070056static unsigned int no_ctrl_smd_ports;
Bar Weiner0dae81b2013-02-14 13:53:54 +020057static unsigned int no_ctrl_qti_ports;
Jack Pham427f6922011-11-23 19:42:00 -080058static unsigned int no_ctrl_hsic_ports;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +053059static unsigned int no_ctrl_hsuart_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070060static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020061static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080062static unsigned int no_data_hsic_ports;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +053063static unsigned int no_data_hsuart_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070065 enum transport_type data_xport;
66 enum transport_type ctrl_xport;
67 unsigned data_xport_num;
68 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069 unsigned port_num;
70 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053071} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
73static struct usb_interface_descriptor rmnet_interface_desc = {
74 .bLength = USB_DT_INTERFACE_SIZE,
75 .bDescriptorType = USB_DT_INTERFACE,
76 .bNumEndpoints = 3,
77 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
78 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
79 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
80 /* .iInterface = DYNAMIC */
81};
82
83/* Full speed support */
84static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
85 .bLength = USB_DT_ENDPOINT_SIZE,
86 .bDescriptorType = USB_DT_ENDPOINT,
87 .bEndpointAddress = USB_DIR_IN,
88 .bmAttributes = USB_ENDPOINT_XFER_INT,
89 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
90 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
91};
92
93static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
94 .bLength = USB_DT_ENDPOINT_SIZE,
95 .bDescriptorType = USB_DT_ENDPOINT,
96 .bEndpointAddress = USB_DIR_IN,
97 .bmAttributes = USB_ENDPOINT_XFER_BULK,
98 .wMaxPacketSize = __constant_cpu_to_le16(64),
99};
100
101static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
102 .bLength = USB_DT_ENDPOINT_SIZE,
103 .bDescriptorType = USB_DT_ENDPOINT,
104 .bEndpointAddress = USB_DIR_OUT,
105 .bmAttributes = USB_ENDPOINT_XFER_BULK,
106 .wMaxPacketSize = __constant_cpu_to_le16(64),
107};
108
109static struct usb_descriptor_header *rmnet_fs_function[] = {
110 (struct usb_descriptor_header *) &rmnet_interface_desc,
111 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
112 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
113 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
114 NULL,
115};
116
117/* High speed support */
118static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
119 .bLength = USB_DT_ENDPOINT_SIZE,
120 .bDescriptorType = USB_DT_ENDPOINT,
121 .bEndpointAddress = USB_DIR_IN,
122 .bmAttributes = USB_ENDPOINT_XFER_INT,
123 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
124 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
125};
126
127static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
128 .bLength = USB_DT_ENDPOINT_SIZE,
129 .bDescriptorType = USB_DT_ENDPOINT,
130 .bEndpointAddress = USB_DIR_IN,
131 .bmAttributes = USB_ENDPOINT_XFER_BULK,
132 .wMaxPacketSize = __constant_cpu_to_le16(512),
133};
134
135static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
136 .bLength = USB_DT_ENDPOINT_SIZE,
137 .bDescriptorType = USB_DT_ENDPOINT,
138 .bEndpointAddress = USB_DIR_OUT,
139 .bmAttributes = USB_ENDPOINT_XFER_BULK,
140 .wMaxPacketSize = __constant_cpu_to_le16(512),
141};
142
143static struct usb_descriptor_header *rmnet_hs_function[] = {
144 (struct usb_descriptor_header *) &rmnet_interface_desc,
145 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
146 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
147 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
148 NULL,
149};
150
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530151/* Super speed support */
152static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
Vijayavardhan Vennapusa724ae312012-11-20 17:36:21 +0530153 .bLength = USB_DT_ENDPOINT_SIZE,
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530154 .bDescriptorType = USB_DT_ENDPOINT,
155 .bEndpointAddress = USB_DIR_IN,
156 .bmAttributes = USB_ENDPOINT_XFER_INT,
157 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
158 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
159};
160
161static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
162 .bLength = sizeof rmnet_ss_notify_comp_desc,
163 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
164
165 /* the following 3 values can be tweaked if necessary */
166 /* .bMaxBurst = 0, */
167 /* .bmAttributes = 0, */
168 .wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
169};
170
171static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
Vijayavardhan Vennapusa724ae312012-11-20 17:36:21 +0530172 .bLength = USB_DT_ENDPOINT_SIZE,
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530173 .bDescriptorType = USB_DT_ENDPOINT,
174 .bEndpointAddress = USB_DIR_IN,
175 .bmAttributes = USB_ENDPOINT_XFER_BULK,
176 .wMaxPacketSize = __constant_cpu_to_le16(1024),
177};
178
179static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
180 .bLength = sizeof rmnet_ss_in_comp_desc,
181 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
182
183 /* the following 2 values can be tweaked if necessary */
184 /* .bMaxBurst = 0, */
185 /* .bmAttributes = 0, */
186};
187
188static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
Vijayavardhan Vennapusa724ae312012-11-20 17:36:21 +0530189 .bLength = USB_DT_ENDPOINT_SIZE,
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530190 .bDescriptorType = USB_DT_ENDPOINT,
191 .bEndpointAddress = USB_DIR_OUT,
192 .bmAttributes = USB_ENDPOINT_XFER_BULK,
193 .wMaxPacketSize = __constant_cpu_to_le16(1024),
194};
195
196static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
197 .bLength = sizeof rmnet_ss_out_comp_desc,
198 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
199
200 /* the following 2 values can be tweaked if necessary */
201 /* .bMaxBurst = 0, */
202 /* .bmAttributes = 0, */
203};
204
205static struct usb_descriptor_header *rmnet_ss_function[] = {
206 (struct usb_descriptor_header *) &rmnet_interface_desc,
207 (struct usb_descriptor_header *) &rmnet_ss_notify_desc,
208 (struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
209 (struct usb_descriptor_header *) &rmnet_ss_in_desc,
210 (struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
211 (struct usb_descriptor_header *) &rmnet_ss_out_desc,
212 (struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
213 NULL,
214};
215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216/* String descriptors */
217
218static struct usb_string rmnet_string_defs[] = {
219 [0].s = "RmNet",
220 { } /* end of list */
221};
222
223static struct usb_gadget_strings rmnet_string_table = {
224 .language = 0x0409, /* en-us */
225 .strings = rmnet_string_defs,
226};
227
228static struct usb_gadget_strings *rmnet_strings[] = {
229 &rmnet_string_table,
230 NULL,
231};
232
Amit Blay2d4fb632012-05-29 18:05:38 +0300233static void frmnet_ctrl_response_available(struct f_rmnet *dev);
234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235/* ------- misc functions --------------------*/
236
237static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
238{
239 return container_of(f, struct f_rmnet, port.func);
240}
241
242static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
243{
244 return container_of(r, struct f_rmnet, port);
245}
246
247static struct usb_request *
248frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
249{
250 struct usb_request *req;
251
252 req = usb_ep_alloc_request(ep, flags);
253 if (!req)
254 return ERR_PTR(-ENOMEM);
255
256 req->buf = kmalloc(len, flags);
257 if (!req->buf) {
258 usb_ep_free_request(ep, req);
259 return ERR_PTR(-ENOMEM);
260 }
261
262 req->length = len;
263
264 return req;
265}
266
267void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
268{
269 kfree(req->buf);
270 usb_ep_free_request(ep, req);
271}
272
273static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
274{
275 struct rmnet_ctrl_pkt *pkt;
276
277 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
278 if (!pkt)
279 return ERR_PTR(-ENOMEM);
280
281 pkt->buf = kmalloc(len, flags);
282 if (!pkt->buf) {
283 kfree(pkt);
284 return ERR_PTR(-ENOMEM);
285 }
286 pkt->len = len;
287
288 return pkt;
289}
290
291static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
292{
293 kfree(pkt->buf);
294 kfree(pkt);
295}
296
297/* -------------------------------------------*/
298
Hemant Kumar1b820d52011-11-03 15:08:28 -0700299static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300{
Jack Pham427f6922011-11-23 19:42:00 -0800301 int ret;
302 int port_idx;
303 int i;
Bar Weiner6c817eb2013-04-15 20:52:18 +0300304 u8 base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530306 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
307 " smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
Ofir Cohena1c2a872011-12-14 10:26:34 +0200308 " nr_rmnet_ports: %u\n",
309 __func__, no_data_bam_ports, no_data_bam2bam_ports,
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530310 no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
311 no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312
Ofir Cohena1c2a872011-12-14 10:26:34 +0200313 if (no_data_bam_ports || no_data_bam2bam_ports) {
314 ret = gbam_setup(no_data_bam_ports,
315 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700316 if (ret)
317 return ret;
318 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319
Hemant Kumar1b820d52011-11-03 15:08:28 -0700320 if (no_ctrl_smd_ports) {
Bar Weiner6c817eb2013-04-15 20:52:18 +0300321 ret = gsmd_ctrl_setup(FRMNET_CTRL_CLIENT,
322 no_ctrl_smd_ports, &base);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700323 if (ret)
324 return ret;
Bar Weiner6c817eb2013-04-15 20:52:18 +0300325 for (i = 0; i < nr_rmnet_ports; i++)
326 if (rmnet_ports[i].port)
327 rmnet_ports[i].port->port_num += base;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700328 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329
Jack Pham427f6922011-11-23 19:42:00 -0800330 if (no_data_hsic_ports) {
331 port_idx = ghsic_data_setup(no_data_hsic_ports,
332 USB_GADGET_RMNET);
333 if (port_idx < 0)
334 return port_idx;
335 for (i = 0; i < nr_rmnet_ports; i++) {
336 if (rmnet_ports[i].data_xport ==
337 USB_GADGET_XPORT_HSIC) {
338 rmnet_ports[i].data_xport_num = port_idx;
339 port_idx++;
340 }
341 }
342 }
343
344 if (no_ctrl_hsic_ports) {
345 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
346 USB_GADGET_RMNET);
347 if (port_idx < 0)
348 return port_idx;
349 for (i = 0; i < nr_rmnet_ports; i++) {
350 if (rmnet_ports[i].ctrl_xport ==
351 USB_GADGET_XPORT_HSIC) {
352 rmnet_ports[i].ctrl_xport_num = port_idx;
353 port_idx++;
354 }
355 }
356 }
357
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530358 if (no_data_hsuart_ports) {
359 port_idx = ghsuart_data_setup(no_data_hsuart_ports,
360 USB_GADGET_RMNET);
361 if (port_idx < 0)
362 return port_idx;
363 for (i = 0; i < nr_rmnet_ports; i++) {
364 if (rmnet_ports[i].data_xport ==
365 USB_GADGET_XPORT_HSUART) {
366 rmnet_ports[i].data_xport_num = port_idx;
367 port_idx++;
368 }
369 }
370 }
371
372 if (no_ctrl_hsuart_ports) {
373 port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports,
374 USB_GADGET_RMNET);
375 if (port_idx < 0)
376 return port_idx;
377 for (i = 0; i < nr_rmnet_ports; i++) {
378 if (rmnet_ports[i].ctrl_xport ==
379 USB_GADGET_XPORT_HSUART) {
380 rmnet_ports[i].ctrl_xport_num = port_idx;
381 port_idx++;
382 }
383 }
384 }
385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 return 0;
387}
388
Manu Gautam2b0234a2011-09-07 16:47:52 +0530389static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700391 int ret;
392 unsigned port_num;
393 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
394 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Vijayavardhan Vennapusa768dfc02013-03-27 18:40:08 +0530395 int src_connection_idx = 0, dst_connection_idx = 0;
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200396 struct usb_gadget *gadget = dev->cdev->gadget;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397
Hemant Kumar1b820d52011-11-03 15:08:28 -0700398 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
399 __func__, xport_to_str(cxport), xport_to_str(dxport),
400 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
Hemant Kumar1b820d52011-11-03 15:08:28 -0700402 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
403 switch (cxport) {
404 case USB_GADGET_XPORT_SMD:
405 ret = gsmd_ctrl_connect(&dev->port, port_num);
406 if (ret) {
407 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
408 __func__, ret);
409 return ret;
410 }
411 break;
Bar Weiner0dae81b2013-02-14 13:53:54 +0200412 case USB_GADGET_XPORT_QTI:
413 ret = gqti_ctrl_connect(&dev->port);
414 if (ret) {
415 pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
416 __func__, ret);
417 return ret;
418 }
419 break;
Jack Pham427f6922011-11-23 19:42:00 -0800420 case USB_GADGET_XPORT_HSIC:
421 ret = ghsic_ctrl_connect(&dev->port, port_num);
422 if (ret) {
423 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
424 __func__, ret);
425 return ret;
426 }
427 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530428 case USB_GADGET_XPORT_HSUART:
429 ret = ghsuart_ctrl_connect(&dev->port, port_num);
430 if (ret) {
431 pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n",
432 __func__, ret);
433 return ret;
434 }
435 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700436 case USB_GADGET_XPORT_NONE:
437 break;
438 default:
439 pr_err("%s: Un-supported transport: %s\n", __func__,
440 xport_to_str(cxport));
441 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 }
443
Hemant Kumar1b820d52011-11-03 15:08:28 -0700444 port_num = rmnet_ports[dev->port_num].data_xport_num;
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200445
Hemant Kumar1b820d52011-11-03 15:08:28 -0700446 switch (dxport) {
Ofir Cohena1c2a872011-12-14 10:26:34 +0200447 case USB_GADGET_XPORT_BAM2BAM:
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200448 src_connection_idx = usb_bam_get_connection_idx(gadget->name,
449 A2_P_BAM, USB_TO_PEER_PERIPHERAL, port_num);
450 dst_connection_idx = usb_bam_get_connection_idx(gadget->name,
451 A2_P_BAM, PEER_PERIPHERAL_TO_USB, port_num);
452 if (dst_connection_idx < 0 || src_connection_idx < 0) {
453 pr_err("%s: usb_bam_get_connection_idx failed\n",
454 __func__);
455 gsmd_ctrl_disconnect(&dev->port, port_num);
456 return ret;
457 }
Vijayavardhan Vennapusa768dfc02013-03-27 18:40:08 +0530458 case USB_GADGET_XPORT_BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200459 ret = gbam_connect(&dev->port, port_num,
Shimrit Malichidbf43d72013-03-16 03:32:27 +0200460 dxport, src_connection_idx, dst_connection_idx);
461 if (ret) {
462 pr_err("%s: gbam_connect failed: err:%d\n",
463 __func__, ret);
464 gsmd_ctrl_disconnect(&dev->port, port_num);
465 return ret;
466 }
467 break;
468 case USB_GADGET_XPORT_BAM2BAM_IPA:
469 src_connection_idx = usb_bam_get_connection_idx(gadget->name,
470 IPA_P_BAM, USB_TO_PEER_PERIPHERAL, port_num);
471 dst_connection_idx = usb_bam_get_connection_idx(gadget->name,
472 IPA_P_BAM, PEER_PERIPHERAL_TO_USB, port_num);
473 if (dst_connection_idx < 0 || src_connection_idx < 0) {
474 pr_err("%s: usb_bam_get_connection_idx failed\n",
475 __func__);
476 gsmd_ctrl_disconnect(&dev->port, port_num);
477 return ret;
478 }
479 ret = gbam_connect(&dev->port, port_num,
480 dxport, src_connection_idx, dst_connection_idx);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700481 if (ret) {
482 pr_err("%s: gbam_connect failed: err:%d\n",
483 __func__, ret);
Bar Weiner0dae81b2013-02-14 13:53:54 +0200484 if (cxport == USB_GADGET_XPORT_QTI)
485 gqti_ctrl_disconnect(&dev->port);
486 else
487 gsmd_ctrl_disconnect(&dev->port, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700488 return ret;
489 }
490 break;
Jack Pham427f6922011-11-23 19:42:00 -0800491 case USB_GADGET_XPORT_HSIC:
492 ret = ghsic_data_connect(&dev->port, port_num);
493 if (ret) {
494 pr_err("%s: ghsic_data_connect failed: err:%d\n",
495 __func__, ret);
496 ghsic_ctrl_disconnect(&dev->port, port_num);
497 return ret;
498 }
499 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530500 case USB_GADGET_XPORT_HSUART:
501 ret = ghsuart_data_connect(&dev->port, port_num);
502 if (ret) {
503 pr_err("%s: ghsuart_data_connect failed: err:%d\n",
504 __func__, ret);
505 ghsuart_ctrl_disconnect(&dev->port, port_num);
506 return ret;
507 }
508 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700509 case USB_GADGET_XPORT_NONE:
510 break;
511 default:
512 pr_err("%s: Un-supported transport: %s\n", __func__,
513 xport_to_str(dxport));
514 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 }
516
517 return 0;
518}
519
Manu Gautam2b0234a2011-09-07 16:47:52 +0530520static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700522 unsigned port_num;
523 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
524 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525
Hemant Kumar1b820d52011-11-03 15:08:28 -0700526 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
527 __func__, xport_to_str(cxport), xport_to_str(dxport),
528 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
Hemant Kumar1b820d52011-11-03 15:08:28 -0700530 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
531 switch (cxport) {
532 case USB_GADGET_XPORT_SMD:
533 gsmd_ctrl_disconnect(&dev->port, port_num);
534 break;
Bar Weiner0dae81b2013-02-14 13:53:54 +0200535 case USB_GADGET_XPORT_QTI:
536 gqti_ctrl_disconnect(&dev->port);
537 break;
Jack Pham427f6922011-11-23 19:42:00 -0800538 case USB_GADGET_XPORT_HSIC:
539 ghsic_ctrl_disconnect(&dev->port, port_num);
540 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530541 case USB_GADGET_XPORT_HSUART:
542 ghsuart_ctrl_disconnect(&dev->port, port_num);
543 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700544 case USB_GADGET_XPORT_NONE:
545 break;
546 default:
547 pr_err("%s: Un-supported transport: %s\n", __func__,
548 xport_to_str(cxport));
549 return -ENODEV;
550 }
551
552 port_num = rmnet_ports[dev->port_num].data_xport_num;
553 switch (dxport) {
554 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200555 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200556 case USB_GADGET_XPORT_BAM2BAM_IPA:
Ofir Cohen77848d62012-12-05 13:16:10 +0200557 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700558 break;
Jack Pham427f6922011-11-23 19:42:00 -0800559 case USB_GADGET_XPORT_HSIC:
560 ghsic_data_disconnect(&dev->port, port_num);
561 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530562 case USB_GADGET_XPORT_HSUART:
563 ghsuart_data_disconnect(&dev->port, port_num);
564 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700565 case USB_GADGET_XPORT_NONE:
566 break;
567 default:
568 pr_err("%s: Un-supported transport: %s\n", __func__,
569 xport_to_str(dxport));
570 return -ENODEV;
571 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572
573 return 0;
574}
575
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
577{
578 struct f_rmnet *dev = func_to_rmnet(f);
579
580 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
581
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +0530582 if (gadget_is_superspeed(c->cdev->gadget))
583 usb_free_descriptors(f->ss_descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 if (gadget_is_dualspeed(c->cdev->gadget))
585 usb_free_descriptors(f->hs_descriptors);
586 usb_free_descriptors(f->descriptors);
587
588 frmnet_free_req(dev->notify, dev->notify_req);
589
Manu Gautamdd4222b2011-09-09 15:06:05 +0530590 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591}
592
Ido Shayevitz909ed382013-02-15 02:25:35 +0200593static void frmnet_purge_responses(struct f_rmnet *dev)
594{
595 unsigned long flags;
596 struct rmnet_ctrl_pkt *cpkt;
597
598 pr_debug("%s: port#%d\n", __func__, dev->port_num);
599
600 spin_lock_irqsave(&dev->lock, flags);
601 while (!list_empty(&dev->cpkt_resp_q)) {
602 cpkt = list_first_entry(&dev->cpkt_resp_q,
603 struct rmnet_ctrl_pkt, list);
604
605 list_del(&cpkt->list);
606 rmnet_free_ctrl_pkt(cpkt);
607 }
608 atomic_set(&dev->notify_count, 0);
609 spin_unlock_irqrestore(&dev->lock, flags);
610}
611
Amit Blaye5bb35e2012-05-08 20:38:20 +0300612static void frmnet_suspend(struct usb_function *f)
613{
614 struct f_rmnet *dev = func_to_rmnet(f);
615 unsigned port_num;
616 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
617
618 pr_debug("%s: data xport: %s dev: %p portno: %d\n",
619 __func__, xport_to_str(dxport),
620 dev, dev->port_num);
621
Ido Shayevitz909ed382013-02-15 02:25:35 +0200622 frmnet_purge_responses(dev);
623
Amit Blaye5bb35e2012-05-08 20:38:20 +0300624 port_num = rmnet_ports[dev->port_num].data_xport_num;
625 switch (dxport) {
626 case USB_GADGET_XPORT_BAM:
627 break;
628 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200629 case USB_GADGET_XPORT_BAM2BAM_IPA:
Amit Blaye5bb35e2012-05-08 20:38:20 +0300630 gbam_suspend(&dev->port, port_num, dxport);
631 break;
632 case USB_GADGET_XPORT_HSIC:
633 break;
Mayank Rana9dc199a2012-08-28 10:22:07 +0530634 case USB_GADGET_XPORT_HSUART:
635 break;
Amit Blaye5bb35e2012-05-08 20:38:20 +0300636 case USB_GADGET_XPORT_NONE:
637 break;
638 default:
639 pr_err("%s: Un-supported transport: %s\n", __func__,
640 xport_to_str(dxport));
641 }
642}
643
644static void frmnet_resume(struct usb_function *f)
645{
646 struct f_rmnet *dev = func_to_rmnet(f);
647 unsigned port_num;
648 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
649
650 pr_debug("%s: data xport: %s dev: %p portno: %d\n",
651 __func__, xport_to_str(dxport),
652 dev, dev->port_num);
653
654 port_num = rmnet_ports[dev->port_num].data_xport_num;
655 switch (dxport) {
656 case USB_GADGET_XPORT_BAM:
657 break;
658 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +0200659 case USB_GADGET_XPORT_BAM2BAM_IPA:
Amit Blaye5bb35e2012-05-08 20:38:20 +0300660 gbam_resume(&dev->port, port_num, dxport);
661 break;
662 case USB_GADGET_XPORT_HSIC:
663 break;
Mayank Rana9dc199a2012-08-28 10:22:07 +0530664 case USB_GADGET_XPORT_HSUART:
665 break;
Amit Blaye5bb35e2012-05-08 20:38:20 +0300666 case USB_GADGET_XPORT_NONE:
667 break;
668 default:
669 pr_err("%s: Un-supported transport: %s\n", __func__,
670 xport_to_str(dxport));
671 }
672}
673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674static void frmnet_disable(struct usb_function *f)
675{
676 struct f_rmnet *dev = func_to_rmnet(f);
677
678 pr_debug("%s: port#%d\n", __func__, dev->port_num);
679
680 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200681 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682
683 atomic_set(&dev->online, 0);
684
Ido Shayevitz909ed382013-02-15 02:25:35 +0200685 frmnet_purge_responses(dev);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700686
Manu Gautam2b0234a2011-09-07 16:47:52 +0530687 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688}
689
690static int
691frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
692{
693 struct f_rmnet *dev = func_to_rmnet(f);
694 struct usb_composite_dev *cdev = dev->cdev;
695 int ret;
Amit Blay2d4fb632012-05-29 18:05:38 +0300696 struct list_head *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697
698 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
699
700 if (dev->notify->driver_data) {
701 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
702 usb_ep_disable(dev->notify);
703 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200704
705 ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
706 if (ret) {
707 dev->notify->desc = NULL;
708 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
709 dev->notify->name, ret);
710 return ret;
711 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300712 ret = usb_ep_enable(dev->notify);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 if (ret) {
715 pr_err("%s: usb ep#%s enable failed, err#%d\n",
716 __func__, dev->notify->name, ret);
717 return ret;
718 }
719 dev->notify->driver_data = dev;
720
Chiranjeevi Velempati502b1c82012-05-16 14:49:46 +0530721 if (!dev->port.in->desc || !dev->port.out->desc) {
Bar Weiner0fc137a2012-03-28 16:58:09 +0200722 if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
723 config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
724 dev->port.in->desc = NULL;
725 dev->port.out->desc = NULL;
726 return -EINVAL;
727 }
728 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 }
730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 atomic_set(&dev->online, 1);
732
Amit Blay2d4fb632012-05-29 18:05:38 +0300733 /* In case notifications were aborted, but there are pending control
734 packets in the response queue, re-add the notifications */
735 list_for_each(cpkt, &dev->cpkt_resp_q)
736 frmnet_ctrl_response_available(dev);
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 return ret;
739}
740
741static void frmnet_ctrl_response_available(struct f_rmnet *dev)
742{
743 struct usb_request *req = dev->notify_req;
744 struct usb_cdc_notification *event;
745 unsigned long flags;
746 int ret;
Anna Perelf3af59d2012-08-12 15:28:30 +0300747 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748
749 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
750
751 spin_lock_irqsave(&dev->lock, flags);
752 if (!atomic_read(&dev->online) || !req || !req->buf) {
753 spin_unlock_irqrestore(&dev->lock, flags);
754 return;
755 }
756
757 if (atomic_inc_return(&dev->notify_count) != 1) {
758 spin_unlock_irqrestore(&dev->lock, flags);
759 return;
760 }
761
762 event = req->buf;
763 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
764 | USB_RECIP_INTERFACE;
765 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
766 event->wValue = cpu_to_le16(0);
767 event->wIndex = cpu_to_le16(dev->ifc_id);
768 event->wLength = cpu_to_le16(0);
769 spin_unlock_irqrestore(&dev->lock, flags);
770
771 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
772 if (ret) {
Anna Perelf3af59d2012-08-12 15:28:30 +0300773 spin_lock_irqsave(&dev->lock, flags);
Ido Shayevitz909ed382013-02-15 02:25:35 +0200774 if (!list_empty(&dev->cpkt_resp_q)) {
775 atomic_dec(&dev->notify_count);
776 cpkt = list_first_entry(&dev->cpkt_resp_q,
Anna Perelf3af59d2012-08-12 15:28:30 +0300777 struct rmnet_ctrl_pkt, list);
Anna Perelf3af59d2012-08-12 15:28:30 +0300778 list_del(&cpkt->list);
779 rmnet_free_ctrl_pkt(cpkt);
780 }
781 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 pr_debug("ep enqueue error %d\n", ret);
783 }
784}
785
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700786static void frmnet_connect(struct grmnet *gr)
787{
788 struct f_rmnet *dev;
789
790 if (!gr) {
791 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
792 return;
793 }
794
795 dev = port_to_rmnet(gr);
796
797 atomic_set(&dev->ctrl_online, 1);
798}
799
800static void frmnet_disconnect(struct grmnet *gr)
801{
802 struct f_rmnet *dev;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700803 struct usb_cdc_notification *event;
804 int status;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700805
806 if (!gr) {
807 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
808 return;
809 }
810
811 dev = port_to_rmnet(gr);
812
813 atomic_set(&dev->ctrl_online, 0);
814
815 if (!atomic_read(&dev->online)) {
816 pr_debug("%s: nothing to do\n", __func__);
817 return;
818 }
819
820 usb_ep_fifo_flush(dev->notify);
821
822 event = dev->notify_req->buf;
823 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
824 | USB_RECIP_INTERFACE;
825 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
826 event->wValue = cpu_to_le16(0);
827 event->wIndex = cpu_to_le16(dev->ifc_id);
828 event->wLength = cpu_to_le16(0);
829
Vamsi Krishna188078d2011-10-26 15:09:55 -0700830 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700831 if (status < 0) {
832 if (!atomic_read(&dev->online))
833 return;
834 pr_err("%s: rmnet notify ep enqueue error %d\n",
835 __func__, status);
836 }
837
Ido Shayevitz909ed382013-02-15 02:25:35 +0200838 frmnet_purge_responses(dev);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700839}
840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700842frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843{
844 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700845 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 unsigned long flags;
847
Hemant Kumarf60c0252011-11-03 12:37:07 -0700848 if (!gr || !buf) {
849 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
850 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 return -ENODEV;
852 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700853 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
854 if (IS_ERR(cpkt)) {
855 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
856 return -ENOMEM;
857 }
858 memcpy(cpkt->buf, buf, len);
859 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860
861 dev = port_to_rmnet(gr);
862
863 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
864
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700865 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 rmnet_free_ctrl_pkt(cpkt);
867 return 0;
868 }
869
870 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530871 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872 spin_unlock_irqrestore(&dev->lock, flags);
873
874 frmnet_ctrl_response_available(dev);
875
876 return 0;
877}
878
879static void
880frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
881{
882 struct f_rmnet *dev = req->context;
883 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700884 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885
886 if (!dev) {
887 pr_err("%s: rmnet dev is null\n", __func__);
888 return;
889 }
890
891 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
892
893 cdev = dev->cdev;
894
Hemant Kumar1b820d52011-11-03 15:08:28 -0700895 if (dev->port.send_encap_cmd) {
896 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
897 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
898 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899}
900
901static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
902{
903 struct f_rmnet *dev = req->context;
904 int status = req->status;
Anna Perelf3af59d2012-08-12 15:28:30 +0300905 unsigned long flags;
906 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907
908 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
909
910 switch (status) {
911 case -ECONNRESET:
912 case -ESHUTDOWN:
913 /* connection gone */
914 atomic_set(&dev->notify_count, 0);
915 break;
916 default:
917 pr_err("rmnet notify ep error %d\n", status);
918 /* FALLTHROUGH */
919 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700920 if (!atomic_read(&dev->ctrl_online))
921 break;
922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 if (atomic_dec_and_test(&dev->notify_count))
924 break;
925
926 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
927 if (status) {
Anna Perelf3af59d2012-08-12 15:28:30 +0300928 spin_lock_irqsave(&dev->lock, flags);
Ido Shayevitz909ed382013-02-15 02:25:35 +0200929 if (!list_empty(&dev->cpkt_resp_q)) {
930 atomic_dec(&dev->notify_count);
931 cpkt = list_first_entry(&dev->cpkt_resp_q,
Anna Perelf3af59d2012-08-12 15:28:30 +0300932 struct rmnet_ctrl_pkt, list);
Anna Perelf3af59d2012-08-12 15:28:30 +0300933 list_del(&cpkt->list);
934 rmnet_free_ctrl_pkt(cpkt);
935 }
936 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 pr_debug("ep enqueue error %d\n", status);
938 }
939 break;
940 }
941}
942
943static int
944frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
945{
946 struct f_rmnet *dev = func_to_rmnet(f);
947 struct usb_composite_dev *cdev = dev->cdev;
948 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700949 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 u16 w_index = le16_to_cpu(ctrl->wIndex);
951 u16 w_value = le16_to_cpu(ctrl->wValue);
952 u16 w_length = le16_to_cpu(ctrl->wLength);
953 int ret = -EOPNOTSUPP;
954
955 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
956
957 if (!atomic_read(&dev->online)) {
958 pr_debug("%s: usb cable is not connected\n", __func__);
959 return -ENOTCONN;
960 }
961
962 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
963
964 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
965 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 ret = w_length;
967 req->complete = frmnet_cmd_complete;
968 req->context = dev;
969 break;
970
971
972 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
973 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
974 if (w_value)
975 goto invalid;
976 else {
977 unsigned len;
978 struct rmnet_ctrl_pkt *cpkt;
979
980 spin_lock(&dev->lock);
981 if (list_empty(&dev->cpkt_resp_q)) {
982 pr_err("ctrl resp queue empty "
983 " req%02x.%02x v%04x i%04x l%d\n",
984 ctrl->bRequestType, ctrl->bRequest,
985 w_value, w_index, w_length);
986 spin_unlock(&dev->lock);
987 goto invalid;
988 }
989
990 cpkt = list_first_entry(&dev->cpkt_resp_q,
991 struct rmnet_ctrl_pkt, list);
992 list_del(&cpkt->list);
993 spin_unlock(&dev->lock);
994
995 len = min_t(unsigned, w_length, cpkt->len);
996 memcpy(req->buf, cpkt->buf, len);
997 ret = len;
998
999 rmnet_free_ctrl_pkt(cpkt);
1000 }
1001 break;
1002 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
1003 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -07001004 if (dev->port.notify_modem) {
1005 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
1006 dev->port.notify_modem(&dev->port, port_num, w_value);
1007 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008 ret = 0;
1009
1010 break;
1011 default:
1012
1013invalid:
1014 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
1015 ctrl->bRequestType, ctrl->bRequest,
1016 w_value, w_index, w_length);
1017 }
1018
1019 /* respond with data transfer or status phase? */
1020 if (ret >= 0) {
1021 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
1022 ctrl->bRequestType, ctrl->bRequest,
1023 w_value, w_index, w_length);
1024 req->zero = (ret < w_length);
1025 req->length = ret;
1026 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
1027 if (ret < 0)
1028 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
1029 }
1030
1031 return ret;
1032}
1033
1034static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
1035{
1036 struct f_rmnet *dev = func_to_rmnet(f);
1037 struct usb_ep *ep;
1038 struct usb_composite_dev *cdev = c->cdev;
1039 int ret = -ENODEV;
1040
1041 dev->ifc_id = usb_interface_id(c, f);
1042 if (dev->ifc_id < 0) {
1043 pr_err("%s: unable to allocate ifc id, err:%d",
1044 __func__, dev->ifc_id);
1045 return dev->ifc_id;
1046 }
1047 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
1048
1049 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
1050 if (!ep) {
1051 pr_err("%s: usb epin autoconfig failed\n", __func__);
1052 return -ENODEV;
1053 }
1054 dev->port.in = ep;
1055 ep->driver_data = cdev;
1056
1057 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
1058 if (!ep) {
1059 pr_err("%s: usb epout autoconfig failed\n", __func__);
1060 ret = -ENODEV;
1061 goto ep_auto_out_fail;
1062 }
1063 dev->port.out = ep;
1064 ep->driver_data = cdev;
1065
1066 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
1067 if (!ep) {
1068 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
1069 ret = -ENODEV;
1070 goto ep_auto_notify_fail;
1071 }
1072 dev->notify = ep;
1073 ep->driver_data = cdev;
1074
1075 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -07001076 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 GFP_KERNEL);
1078 if (IS_ERR(dev->notify_req)) {
1079 pr_err("%s: unable to allocate memory for notify req\n",
1080 __func__);
1081 ret = -ENOMEM;
1082 goto ep_notify_alloc_fail;
1083 }
1084
1085 dev->notify_req->complete = frmnet_notify_complete;
1086 dev->notify_req->context = dev;
1087
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301088 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
1090
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301091 if (!f->descriptors)
1092 goto fail;
1093
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 if (gadget_is_dualspeed(cdev->gadget)) {
1095 rmnet_hs_in_desc.bEndpointAddress =
1096 rmnet_fs_in_desc.bEndpointAddress;
1097 rmnet_hs_out_desc.bEndpointAddress =
1098 rmnet_fs_out_desc.bEndpointAddress;
1099 rmnet_hs_notify_desc.bEndpointAddress =
1100 rmnet_fs_notify_desc.bEndpointAddress;
1101
1102 /* copy descriptors, and track endpoint copies */
1103 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
1104
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301105 if (!f->hs_descriptors)
1106 goto fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 }
1108
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301109 if (gadget_is_superspeed(cdev->gadget)) {
1110 rmnet_ss_in_desc.bEndpointAddress =
1111 rmnet_fs_in_desc.bEndpointAddress;
1112 rmnet_ss_out_desc.bEndpointAddress =
1113 rmnet_fs_out_desc.bEndpointAddress;
1114 rmnet_ss_notify_desc.bEndpointAddress =
1115 rmnet_fs_notify_desc.bEndpointAddress;
1116
1117 /* copy descriptors, and track endpoint copies */
1118 f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
1119
1120 if (!f->ss_descriptors)
1121 goto fail;
1122 }
1123
Mayank Ranace1aaa52013-06-12 12:29:54 +05301124 pr_debug("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 __func__, dev->port_num,
1126 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
1127 dev->port.in->name, dev->port.out->name);
1128
1129 return 0;
1130
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301131fail:
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301132 if (f->ss_descriptors)
1133 usb_free_descriptors(f->ss_descriptors);
1134 if (f->hs_descriptors)
1135 usb_free_descriptors(f->hs_descriptors);
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +05301136 if (f->descriptors)
1137 usb_free_descriptors(f->descriptors);
Pavankumar Kondeti6f94bc92012-08-03 09:34:32 +05301138 if (dev->notify_req)
1139 frmnet_free_req(dev->notify, dev->notify_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140ep_notify_alloc_fail:
1141 dev->notify->driver_data = NULL;
1142 dev->notify = NULL;
1143ep_auto_notify_fail:
1144 dev->port.out->driver_data = NULL;
1145 dev->port.out = NULL;
1146ep_auto_out_fail:
1147 dev->port.in->driver_data = NULL;
1148 dev->port.in = NULL;
1149
1150 return ret;
1151}
1152
Manu Gautam2b0234a2011-09-07 16:47:52 +05301153static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155 int status;
1156 struct f_rmnet *dev;
1157 struct usb_function *f;
1158 unsigned long flags;
1159
1160 pr_debug("%s: usb config:%p\n", __func__, c);
1161
Manu Gautam2b0234a2011-09-07 16:47:52 +05301162 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +05301164 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 return -ENODEV;
1166 }
1167
1168 if (rmnet_string_defs[0].id == 0) {
1169 status = usb_string_id(c->cdev);
1170 if (status < 0) {
1171 pr_err("%s: failed to get string id, err:%d\n",
1172 __func__, status);
1173 return status;
1174 }
1175 rmnet_string_defs[0].id = status;
1176 }
1177
Manu Gautam2b0234a2011-09-07 16:47:52 +05301178 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179
1180 spin_lock_irqsave(&dev->lock, flags);
1181 dev->cdev = c->cdev;
1182 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -07001183 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -07001185 if (!f->name) {
1186 pr_err("%s: cannot allocate memory for name\n", __func__);
1187 return -ENOMEM;
1188 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189
1190 f->strings = rmnet_strings;
1191 f->bind = frmnet_bind;
1192 f->unbind = frmnet_unbind;
1193 f->disable = frmnet_disable;
1194 f->set_alt = frmnet_set_alt;
1195 f->setup = frmnet_setup;
Amit Blaye5bb35e2012-05-08 20:38:20 +03001196 f->suspend = frmnet_suspend;
1197 f->resume = frmnet_resume;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -07001199 dev->port.disconnect = frmnet_disconnect;
1200 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201
1202 status = usb_add_function(c, f);
1203 if (status) {
1204 pr_err("%s: usb add function failed: %d\n",
1205 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +05301206 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207 return status;
1208 }
1209
1210 pr_debug("%s: complete\n", __func__);
1211
1212 return status;
1213}
1214
Manu Gautame3e897c2011-09-12 17:18:46 +05301215static void frmnet_cleanup(void)
1216{
1217 int i;
1218
1219 for (i = 0; i < nr_rmnet_ports; i++)
1220 kfree(rmnet_ports[i].port);
1221
Tarun Gupta44ad2bb2013-09-30 18:01:58 +05301222 gbam_cleanup();
Manu Gautame3e897c2011-09-12 17:18:46 +05301223 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001224 no_ctrl_smd_ports = 0;
Bar Weiner0dae81b2013-02-14 13:53:54 +02001225 no_ctrl_qti_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001226 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001227 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001228 no_ctrl_hsic_ports = 0;
1229 no_data_hsic_ports = 0;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301230 no_ctrl_hsuart_ports = 0;
1231 no_data_hsuart_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +05301232}
1233
Hemant Kumarc2b17782013-02-03 15:56:29 -08001234static int frmnet_init_port(const char *ctrl_name, const char *data_name,
1235 const char *port_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236{
Hemant Kumar1b820d52011-11-03 15:08:28 -07001237 struct f_rmnet *dev;
1238 struct rmnet_ports *rmnet_port;
1239 int ret;
1240 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241
Hemant Kumar1b820d52011-11-03 15:08:28 -07001242 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
1243 pr_err("%s: Max-%d instances supported\n",
1244 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +05301245 return -EINVAL;
1246 }
1247
Hemant Kumar1b820d52011-11-03 15:08:28 -07001248 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
1249 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250
Hemant Kumar1b820d52011-11-03 15:08:28 -07001251 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
1252 if (!dev) {
1253 pr_err("%s: Unable to allocate rmnet device\n", __func__);
1254 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255 }
1256
Hemant Kumar1b820d52011-11-03 15:08:28 -07001257 dev->port_num = nr_rmnet_ports;
1258 spin_lock_init(&dev->lock);
1259 INIT_LIST_HEAD(&dev->cpkt_resp_q);
1260
1261 rmnet_port = &rmnet_ports[nr_rmnet_ports];
1262 rmnet_port->port = dev;
1263 rmnet_port->port_num = nr_rmnet_ports;
1264 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1265 rmnet_port->data_xport = str_to_xport(data_name);
1266
1267 switch (rmnet_port->ctrl_xport) {
1268 case USB_GADGET_XPORT_SMD:
1269 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1270 no_ctrl_smd_ports++;
1271 break;
Bar Weiner0dae81b2013-02-14 13:53:54 +02001272 case USB_GADGET_XPORT_QTI:
1273 rmnet_port->ctrl_xport_num = no_ctrl_qti_ports;
1274 no_ctrl_qti_ports++;
1275 break;
Jack Pham427f6922011-11-23 19:42:00 -08001276 case USB_GADGET_XPORT_HSIC:
Hemant Kumarc2b17782013-02-03 15:56:29 -08001277 ghsic_ctrl_set_port_name(port_name, ctrl_name);
Jack Pham427f6922011-11-23 19:42:00 -08001278 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1279 no_ctrl_hsic_ports++;
1280 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301281 case USB_GADGET_XPORT_HSUART:
1282 rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
1283 no_ctrl_hsuart_ports++;
1284 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001285 case USB_GADGET_XPORT_NONE:
1286 break;
1287 default:
1288 pr_err("%s: Un-supported transport: %u\n", __func__,
1289 rmnet_port->ctrl_xport);
1290 ret = -ENODEV;
1291 goto fail_probe;
1292 }
1293
1294 switch (rmnet_port->data_xport) {
1295 case USB_GADGET_XPORT_BAM:
1296 rmnet_port->data_xport_num = no_data_bam_ports;
1297 no_data_bam_ports++;
1298 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001299 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohenfdecb602012-11-16 15:50:01 +02001300 case USB_GADGET_XPORT_BAM2BAM_IPA:
Ofir Cohena1c2a872011-12-14 10:26:34 +02001301 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1302 no_data_bam2bam_ports++;
1303 break;
Jack Pham427f6922011-11-23 19:42:00 -08001304 case USB_GADGET_XPORT_HSIC:
Hemant Kumarc2b17782013-02-03 15:56:29 -08001305 ghsic_data_set_port_name(port_name, data_name);
Jack Pham427f6922011-11-23 19:42:00 -08001306 rmnet_port->data_xport_num = no_data_hsic_ports;
1307 no_data_hsic_ports++;
1308 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301309 case USB_GADGET_XPORT_HSUART:
1310 rmnet_port->data_xport_num = no_data_hsuart_ports;
1311 no_data_hsuart_ports++;
1312 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001313 case USB_GADGET_XPORT_NONE:
1314 break;
1315 default:
1316 pr_err("%s: Un-supported transport: %u\n", __func__,
1317 rmnet_port->data_xport);
1318 ret = -ENODEV;
1319 goto fail_probe;
1320 }
1321 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322
1323 return 0;
1324
1325fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301326 for (i = 0; i < nr_rmnet_ports; i++)
1327 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328
Hemant Kumar1b820d52011-11-03 15:08:28 -07001329 nr_rmnet_ports = 0;
1330 no_ctrl_smd_ports = 0;
Bar Weiner0dae81b2013-02-14 13:53:54 +02001331 no_ctrl_qti_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001332 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001333 no_ctrl_hsic_ports = 0;
1334 no_data_hsic_ports = 0;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301335 no_ctrl_hsuart_ports = 0;
1336 no_data_hsuart_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 return ret;
1339}