Chandana Kishori Chiluveru | 50a2184 | 2017-11-06 14:54:24 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/device.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/termios.h> |
| 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/debugfs.h> |
| 21 | #include <linux/bitops.h> |
| 22 | #include <linux/termios.h> |
| 23 | #include <linux/usb_bam.h> |
| 24 | |
| 25 | #include "u_data_ipa.h" |
| 26 | #include "u_rmnet.h" |
| 27 | |
| 28 | struct ipa_data_ch_info { |
| 29 | struct usb_request *rx_req; |
| 30 | struct usb_request *tx_req; |
| 31 | unsigned long flags; |
| 32 | unsigned int id; |
| 33 | enum ipa_func_type func_type; |
| 34 | bool is_connected; |
| 35 | unsigned int port_num; |
| 36 | spinlock_t port_lock; |
| 37 | |
| 38 | struct work_struct connect_w; |
| 39 | struct work_struct disconnect_w; |
| 40 | struct work_struct suspend_w; |
| 41 | struct work_struct resume_w; |
| 42 | |
| 43 | u32 src_pipe_idx; |
| 44 | u32 dst_pipe_idx; |
| 45 | u8 src_connection_idx; |
| 46 | u8 dst_connection_idx; |
| 47 | enum usb_ctrl usb_bam_type; |
| 48 | struct gadget_ipa_port *port_usb; |
| 49 | struct usb_gadget *gadget; |
| 50 | atomic_t pipe_connect_notified; |
| 51 | struct usb_bam_connect_ipa_params ipa_params; |
| 52 | }; |
| 53 | |
| 54 | struct rndis_data_ch_info { |
| 55 | /* this provides downlink (device->host i.e host) side configuration*/ |
| 56 | u32 dl_max_transfer_size; |
| 57 | /* this provides uplink (host->device i.e device) side configuration */ |
| 58 | u32 ul_max_transfer_size; |
| 59 | u32 ul_max_packets_number; |
| 60 | bool ul_aggregation_enable; |
| 61 | u32 prod_clnt_hdl; |
| 62 | u32 cons_clnt_hdl; |
| 63 | void *priv; |
| 64 | }; |
| 65 | |
| 66 | static struct workqueue_struct *ipa_data_wq; |
| 67 | struct ipa_data_ch_info *ipa_data_ports[IPA_N_PORTS]; |
| 68 | static struct rndis_data_ch_info *rndis_data; |
| 69 | /** |
| 70 | * ipa_data_endless_complete() - completion callback for endless TX/RX request |
| 71 | * @ep: USB endpoint for which this completion happen |
| 72 | * @req: USB endless request |
| 73 | * |
| 74 | * This completion is being called when endless (TX/RX) transfer is terminated |
| 75 | * i.e. disconnect or suspend case. |
| 76 | */ |
| 77 | static void ipa_data_endless_complete(struct usb_ep *ep, |
| 78 | struct usb_request *req) |
| 79 | { |
| 80 | pr_debug("%s: endless complete for(%s) with status: %d\n", |
| 81 | __func__, ep->name, req->status); |
| 82 | } |
| 83 | |
| 84 | /** |
| 85 | * ipa_data_start_endless_xfer() - configure USB endpoint and |
| 86 | * queue endless TX/RX request |
| 87 | * @port: USB IPA data channel information |
| 88 | * @in: USB endpoint direction i.e. true: IN(Device TX), false: OUT(Device RX) |
| 89 | * |
| 90 | * It is being used to queue endless TX/RX request with UDC driver. |
| 91 | * It does set required DBM endpoint configuration before queueing endless |
| 92 | * TX/RX request. |
| 93 | */ |
| 94 | static void ipa_data_start_endless_xfer(struct ipa_data_ch_info *port, bool in) |
| 95 | { |
| 96 | unsigned long flags; |
| 97 | int status; |
| 98 | struct usb_ep *ep; |
| 99 | |
| 100 | spin_lock_irqsave(&port->port_lock, flags); |
| 101 | if (!port->port_usb || (in && !port->tx_req) |
| 102 | || (!in && !port->rx_req)) { |
| 103 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 104 | pr_err("%s(): port_usb/req is NULL.\n", __func__); |
| 105 | return; |
| 106 | } |
| 107 | |
| 108 | if (in) |
| 109 | ep = port->port_usb->in; |
| 110 | else |
| 111 | ep = port->port_usb->out; |
| 112 | |
| 113 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 114 | |
| 115 | if (in) { |
| 116 | pr_debug("%s: enqueue endless TX_REQ(IN)\n", __func__); |
| 117 | status = usb_ep_queue(ep, port->tx_req, GFP_ATOMIC); |
| 118 | if (status) |
| 119 | pr_err("error enqueuing endless TX_REQ, %d\n", status); |
| 120 | } else { |
| 121 | pr_debug("%s: enqueue endless RX_REQ(OUT)\n", __func__); |
| 122 | status = usb_ep_queue(ep, port->rx_req, GFP_ATOMIC); |
| 123 | if (status) |
| 124 | pr_err("error enqueuing endless RX_REQ, %d\n", status); |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | /** |
| 129 | * ipa_data_stop_endless_xfer() - terminate and dequeue endless TX/RX request |
| 130 | * @port: USB IPA data channel information |
| 131 | * @in: USB endpoint direction i.e. IN - Device TX, OUT - Device RX |
| 132 | * |
| 133 | * It is being used to terminate and dequeue endless TX/RX request with UDC |
| 134 | * driver. |
| 135 | */ |
| 136 | static void ipa_data_stop_endless_xfer(struct ipa_data_ch_info *port, bool in) |
| 137 | { |
| 138 | unsigned long flags; |
| 139 | int status; |
| 140 | struct usb_ep *ep; |
| 141 | |
| 142 | spin_lock_irqsave(&port->port_lock, flags); |
| 143 | if (!port->port_usb || (in && !port->tx_req) |
| 144 | || (!in && !port->rx_req)) { |
| 145 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 146 | pr_err("%s(): port_usb/req is NULL.\n", __func__); |
| 147 | return; |
| 148 | } |
| 149 | |
| 150 | if (in) |
| 151 | ep = port->port_usb->in; |
| 152 | else |
| 153 | ep = port->port_usb->out; |
| 154 | |
| 155 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 156 | |
| 157 | if (in) { |
| 158 | pr_debug("%s: dequeue endless TX_REQ(IN)\n", __func__); |
| 159 | status = usb_ep_dequeue(ep, port->tx_req); |
| 160 | if (status) |
| 161 | pr_err("error dequeueing endless TX_REQ, %d\n", status); |
| 162 | } else { |
| 163 | pr_debug("%s: dequeue endless RX_REQ(OUT)\n", __func__); |
| 164 | status = usb_ep_dequeue(ep, port->rx_req); |
| 165 | if (status) |
| 166 | pr_err("error dequeueing endless RX_REQ, %d\n", status); |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * Called when IPA triggers us that the network interface is up. |
| 172 | * Starts the transfers on bulk endpoints. |
| 173 | * (optimization reasons, the pipes and bam with IPA are already connected) |
| 174 | */ |
| 175 | void ipa_data_start_rx_tx(enum ipa_func_type func) |
| 176 | { |
| 177 | struct ipa_data_ch_info *port; |
| 178 | unsigned long flags; |
| 179 | struct usb_ep *epin, *epout; |
| 180 | |
| 181 | pr_debug("%s: Triggered: starting tx, rx", __func__); |
| 182 | /* queue in & out requests */ |
| 183 | port = ipa_data_ports[func]; |
| 184 | if (!port) { |
| 185 | pr_err("%s: port is NULL, can't start tx, rx", __func__); |
| 186 | return; |
| 187 | } |
| 188 | |
| 189 | spin_lock_irqsave(&port->port_lock, flags); |
| 190 | |
| 191 | if (!port->port_usb || !port->port_usb->in || |
| 192 | !port->port_usb->out) { |
| 193 | pr_err("%s: Can't start tx, rx, ep not enabled", __func__); |
| 194 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 195 | return; |
| 196 | } |
| 197 | |
| 198 | if (!port->rx_req || !port->tx_req) { |
| 199 | pr_err("%s: No request d->rx_req=%pK, d->tx_req=%pK", __func__, |
| 200 | port->rx_req, port->tx_req); |
| 201 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 202 | return; |
| 203 | } |
| 204 | if (!port->is_connected) { |
| 205 | pr_debug("%s: pipes are disconnected", __func__); |
| 206 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 207 | return; |
| 208 | } |
| 209 | |
| 210 | epout = port->port_usb->out; |
| 211 | epin = port->port_usb->in; |
| 212 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 213 | |
| 214 | /* queue in & out requests */ |
| 215 | pr_debug("%s: Starting rx", __func__); |
| 216 | if (epout) |
| 217 | ipa_data_start_endless_xfer(port, false); |
| 218 | |
| 219 | pr_debug("%s: Starting tx", __func__); |
| 220 | if (epin) |
| 221 | ipa_data_start_endless_xfer(port, true); |
| 222 | } |
| 223 | /** |
| 224 | * ipa_data_disconnect_work() - Perform USB IPA BAM disconnect |
| 225 | * @w: disconnect work |
| 226 | * |
| 227 | * It is being schedule from ipa_data_disconnect() API when particular function |
| 228 | * is being disable due to USB disconnect or USB composition switch is being |
| 229 | * trigger . This API performs disconnect of USB BAM pipe, IPA BAM pipe and also |
| 230 | * initiate USB IPA BAM pipe handshake for USB Disconnect sequence. Due to |
| 231 | * handshake operation and involvement of SPS related APIs, this functioality |
| 232 | * can't be used from atomic context. |
| 233 | */ |
| 234 | static void ipa_data_disconnect_work(struct work_struct *w) |
| 235 | { |
| 236 | struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, |
| 237 | disconnect_w); |
| 238 | unsigned long flags; |
| 239 | int ret; |
| 240 | |
| 241 | spin_lock_irqsave(&port->port_lock, flags); |
| 242 | if (!port->is_connected) { |
| 243 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 244 | pr_debug("Already disconnected.\n"); |
| 245 | return; |
| 246 | } |
| 247 | port->is_connected = false; |
| 248 | pr_debug("%s(): prod_clnt_hdl:%d cons_clnt_hdl:%d\n", __func__, |
| 249 | port->ipa_params.prod_clnt_hdl, |
| 250 | port->ipa_params.cons_clnt_hdl); |
| 251 | |
| 252 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 253 | ret = usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params); |
| 254 | if (ret) |
| 255 | pr_err("usb_bam_disconnect_ipa failed: err:%d\n", ret); |
| 256 | |
| 257 | if (port->func_type == USB_IPA_FUNC_RNDIS) { |
| 258 | /* |
| 259 | * NOTE: it is required to disconnect USB and IPA BAM related |
| 260 | * pipes before calling IPA tethered function related disconnect |
| 261 | * API. IPA tethered function related disconnect API delete |
| 262 | * depedency graph with IPA RM which would results into IPA not |
| 263 | * pulling data although there is pending data on USB BAM |
| 264 | * producer pipe. |
| 265 | */ |
| 266 | if (atomic_xchg(&port->pipe_connect_notified, 0) == 1) { |
| 267 | void *priv; |
| 268 | |
| 269 | priv = rndis_qc_get_ipa_priv(); |
| 270 | rndis_ipa_pipe_disconnect_notify(priv); |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | if (port->ipa_params.prod_clnt_hdl) |
| 275 | usb_bam_free_fifos(port->usb_bam_type, |
| 276 | port->dst_connection_idx); |
| 277 | if (port->ipa_params.cons_clnt_hdl) |
| 278 | usb_bam_free_fifos(port->usb_bam_type, |
| 279 | port->src_connection_idx); |
| 280 | |
| 281 | if (port->func_type == USB_IPA_FUNC_RMNET) |
| 282 | teth_bridge_disconnect(port->ipa_params.src_client); |
| 283 | /* |
| 284 | * Decrement usage count which was incremented |
| 285 | * upon cable connect or cable disconnect in suspended state. |
| 286 | */ |
| 287 | usb_gadget_autopm_put_async(port->gadget); |
| 288 | |
| 289 | pr_debug("%s(): disconnect work completed.\n", __func__); |
| 290 | } |
| 291 | |
| 292 | /** |
| 293 | * ipa_data_disconnect() - Restore USB ep operation and disable USB endpoint |
| 294 | * @gp: USB gadget IPA Port |
| 295 | * @port_num: Port num used by function driver which need to be disable |
| 296 | * |
| 297 | * It is being called from atomic context from gadget driver when particular |
| 298 | * function is being disable due to USB cable disconnect or USB composition |
| 299 | * switch is being trigger. This API performs restoring USB endpoint operation |
| 300 | * and disable USB endpoint used for accelerated path. |
| 301 | */ |
| 302 | void ipa_data_disconnect(struct gadget_ipa_port *gp, enum ipa_func_type func) |
| 303 | { |
| 304 | struct ipa_data_ch_info *port; |
| 305 | unsigned long flags; |
| 306 | struct usb_gadget *gadget = NULL; |
| 307 | |
| 308 | pr_debug("dev:%pK port number:%d\n", gp, func); |
| 309 | if (func >= USB_IPA_NUM_FUNCS) { |
| 310 | pr_err("invalid ipa portno#%d\n", func); |
| 311 | return; |
| 312 | } |
| 313 | |
| 314 | if (!gp) { |
| 315 | pr_err("data port is null\n"); |
| 316 | return; |
| 317 | } |
| 318 | |
| 319 | port = ipa_data_ports[func]; |
| 320 | if (!port) { |
| 321 | pr_err("port %u is NULL", func); |
| 322 | return; |
| 323 | } |
| 324 | |
| 325 | spin_lock_irqsave(&port->port_lock, flags); |
| 326 | if (port->port_usb) { |
| 327 | gadget = port->port_usb->cdev->gadget; |
| 328 | port->port_usb->ipa_consumer_ep = -1; |
| 329 | port->port_usb->ipa_producer_ep = -1; |
| 330 | |
| 331 | if (port->port_usb->in) { |
| 332 | /* |
| 333 | * Disable endpoints. |
| 334 | * Unlocking is needed since disabling the eps might |
| 335 | * stop active transfers and therefore the request |
| 336 | * complete function will be called, where we try |
| 337 | * to obtain the spinlock as well. |
| 338 | */ |
| 339 | msm_ep_unconfig(port->port_usb->in); |
| 340 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 341 | usb_ep_disable(port->port_usb->in); |
| 342 | spin_lock_irqsave(&port->port_lock, flags); |
| 343 | if (port->tx_req) { |
| 344 | usb_ep_free_request(port->port_usb->in, |
| 345 | port->tx_req); |
| 346 | port->tx_req = NULL; |
| 347 | } |
| 348 | port->port_usb->in->endless = false; |
| 349 | } |
| 350 | |
| 351 | if (port->port_usb->out) { |
| 352 | msm_ep_unconfig(port->port_usb->out); |
| 353 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 354 | usb_ep_disable(port->port_usb->out); |
| 355 | spin_lock_irqsave(&port->port_lock, flags); |
| 356 | if (port->rx_req) { |
| 357 | usb_ep_free_request(port->port_usb->out, |
| 358 | port->rx_req); |
| 359 | port->rx_req = NULL; |
| 360 | } |
| 361 | port->port_usb->out->endless = false; |
| 362 | } |
| 363 | |
| 364 | port->port_usb = NULL; |
| 365 | } |
| 366 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 367 | queue_work(ipa_data_wq, &port->disconnect_w); |
| 368 | } |
| 369 | |
| 370 | /** |
| 371 | * configure_fifo() - Configure USB BAM Pipe's data FIFO |
| 372 | * @idx: USB BAM Pipe index |
| 373 | * @ep: USB endpoint |
| 374 | * |
| 375 | * This function configures USB BAM data fifo using fetched pipe configuraion |
| 376 | * using provided index value. This function needs to used before starting |
| 377 | * endless transfer. |
| 378 | */ |
| 379 | static void configure_fifo(enum usb_ctrl bam_type, u8 idx, struct usb_ep *ep) |
| 380 | { |
| 381 | struct sps_mem_buffer data_fifo = {0}; |
| 382 | u32 usb_bam_pipe_idx; |
| 383 | |
| 384 | get_bam2bam_connection_info(bam_type, idx, |
| 385 | &usb_bam_pipe_idx, |
| 386 | NULL, &data_fifo, NULL); |
| 387 | msm_data_fifo_config(ep, data_fifo.phys_base, data_fifo.size, |
| 388 | usb_bam_pipe_idx); |
| 389 | } |
| 390 | |
| 391 | /** |
| 392 | * ipa_data_connect_work() - Perform USB IPA BAM connect |
| 393 | * @w: connect work |
| 394 | * |
| 395 | * It is being schedule from ipa_data_connect() API when particular function |
| 396 | * which is using USB IPA accelerated path. This API performs allocating request |
| 397 | * for USB endpoint (tx/rx) for endless purpose, configure USB endpoint to be |
| 398 | * used in accelerated path, connect of USB BAM pipe, IPA BAM pipe and also |
| 399 | * initiate USB IPA BAM pipe handshake for connect sequence. |
| 400 | */ |
| 401 | static void ipa_data_connect_work(struct work_struct *w) |
| 402 | { |
| 403 | struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, |
| 404 | connect_w); |
| 405 | struct gadget_ipa_port *gport; |
| 406 | struct usb_gadget *gadget = NULL; |
| 407 | struct teth_bridge_connect_params connect_params; |
| 408 | struct teth_bridge_init_params teth_bridge_params; |
| 409 | u32 sps_params; |
| 410 | int ret; |
| 411 | unsigned long flags; |
| 412 | bool is_ipa_disconnected = true; |
| 413 | |
| 414 | pr_debug("%s: Connect workqueue started\n", __func__); |
| 415 | |
| 416 | spin_lock_irqsave(&port->port_lock, flags); |
| 417 | |
| 418 | if (!port->port_usb) { |
| 419 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 420 | usb_gadget_autopm_put_async(port->gadget); |
| 421 | pr_err("%s(): port_usb is NULL.\n", __func__); |
| 422 | return; |
| 423 | } |
| 424 | |
| 425 | gport = port->port_usb; |
| 426 | if (gport && gport->cdev) |
| 427 | gadget = gport->cdev->gadget; |
| 428 | |
| 429 | if (!gadget) { |
| 430 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 431 | usb_gadget_autopm_put_async(port->gadget); |
| 432 | pr_err("%s: gport is NULL.\n", __func__); |
| 433 | return; |
| 434 | } |
| 435 | |
| 436 | /* |
| 437 | * check if connect_w got called two times during RNDIS resume as |
| 438 | * explicit flow control is called to start data transfers after |
| 439 | * ipa_data_connect() |
| 440 | */ |
| 441 | if (port->is_connected) { |
| 442 | pr_debug("IPA connect is already done & Transfers started\n"); |
| 443 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 444 | usb_gadget_autopm_put_async(port->gadget); |
| 445 | return; |
| 446 | } |
| 447 | |
| 448 | gport->ipa_consumer_ep = -1; |
| 449 | gport->ipa_producer_ep = -1; |
| 450 | |
| 451 | port->is_connected = true; |
| 452 | |
| 453 | /* update IPA Parameteres here. */ |
| 454 | port->ipa_params.usb_connection_speed = gadget->speed; |
| 455 | port->ipa_params.reset_pipe_after_lpm = |
| 456 | msm_dwc3_reset_ep_after_lpm(gadget); |
| 457 | port->ipa_params.skip_ep_cfg = true; |
| 458 | port->ipa_params.keep_ipa_awake = true; |
| 459 | port->ipa_params.cons_clnt_hdl = -1; |
| 460 | port->ipa_params.prod_clnt_hdl = -1; |
| 461 | |
| 462 | if (gport->out) { |
| 463 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 464 | usb_bam_alloc_fifos(port->usb_bam_type, |
| 465 | port->src_connection_idx); |
| 466 | spin_lock_irqsave(&port->port_lock, flags); |
| 467 | if (!port->port_usb || port->rx_req == NULL) { |
| 468 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 469 | pr_err("%s: port_usb is NULL, or rx_req cleaned\n", |
| 470 | __func__); |
| 471 | goto out; |
| 472 | } |
| 473 | |
| 474 | sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
| 475 | | MSM_PRODUCER | port->src_pipe_idx; |
| 476 | port->rx_req->length = 32*1024; |
| 477 | port->rx_req->udc_priv = sps_params; |
| 478 | configure_fifo(port->usb_bam_type, |
| 479 | port->src_connection_idx, |
| 480 | port->port_usb->out); |
| 481 | ret = msm_ep_config(gport->out); |
| 482 | if (ret) { |
| 483 | pr_err("msm_ep_config() failed for OUT EP\n"); |
| 484 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 485 | goto out; |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | if (gport->in) { |
| 490 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 491 | usb_bam_alloc_fifos(port->usb_bam_type, |
| 492 | port->dst_connection_idx); |
| 493 | spin_lock_irqsave(&port->port_lock, flags); |
| 494 | if (!port->port_usb || port->tx_req == NULL) { |
| 495 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 496 | pr_err("%s: port_usb is NULL, or tx_req cleaned\n", |
| 497 | __func__); |
| 498 | goto unconfig_msm_ep_out; |
| 499 | } |
| 500 | sps_params = MSM_SPS_MODE | MSM_DISABLE_WB | |
| 501 | port->dst_pipe_idx; |
| 502 | port->tx_req->length = 32*1024; |
| 503 | port->tx_req->udc_priv = sps_params; |
| 504 | configure_fifo(port->usb_bam_type, |
| 505 | port->dst_connection_idx, gport->in); |
| 506 | ret = msm_ep_config(gport->in); |
| 507 | if (ret) { |
| 508 | pr_err("msm_ep_config() failed for IN EP\n"); |
| 509 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 510 | goto unconfig_msm_ep_out; |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | if (port->func_type == USB_IPA_FUNC_RMNET) { |
| 515 | teth_bridge_params.client = port->ipa_params.src_client; |
| 516 | ret = teth_bridge_init(&teth_bridge_params); |
| 517 | if (ret) { |
| 518 | pr_err("%s:teth_bridge_init() failed\n", __func__); |
| 519 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 520 | goto unconfig_msm_ep_in; |
| 521 | } |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * Perform below operations for Tx from Device (OUT transfer) |
| 526 | * 1. Connect with pipe of USB BAM with IPA BAM pipe |
| 527 | * 2. Update USB Endpoint related information using SPS Param. |
| 528 | * 3. Configure USB Endpoint/DBM for the same. |
| 529 | * 4. Override USB ep queue functionality for endless transfer. |
| 530 | */ |
| 531 | if (gport->out) { |
| 532 | pr_debug("configure bam ipa connect for USB OUT\n"); |
| 533 | port->ipa_params.dir = USB_TO_PEER_PERIPHERAL; |
| 534 | |
| 535 | if (port->func_type == USB_IPA_FUNC_RNDIS) { |
| 536 | port->ipa_params.notify = rndis_qc_get_ipa_rx_cb(); |
| 537 | port->ipa_params.priv = rndis_qc_get_ipa_priv(); |
| 538 | port->ipa_params.skip_ep_cfg = |
| 539 | rndis_qc_get_skip_ep_config(); |
| 540 | } else if (port->func_type == USB_IPA_FUNC_RMNET) { |
| 541 | port->ipa_params.notify = |
| 542 | teth_bridge_params.usb_notify_cb; |
| 543 | port->ipa_params.priv = |
| 544 | teth_bridge_params.private_data; |
| 545 | port->ipa_params.reset_pipe_after_lpm = |
| 546 | msm_dwc3_reset_ep_after_lpm(gadget); |
| 547 | port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC; |
| 548 | port->ipa_params.skip_ep_cfg = |
| 549 | teth_bridge_params.skip_ep_cfg; |
| 550 | } |
| 551 | |
| 552 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 553 | ret = usb_bam_connect_ipa(port->usb_bam_type, |
| 554 | &port->ipa_params); |
| 555 | if (ret) { |
| 556 | pr_err("usb_bam_connect_ipa out failed err:%d\n", ret); |
| 557 | goto disconnect_usb_bam_ipa_out; |
| 558 | } |
| 559 | spin_lock_irqsave(&port->port_lock, flags); |
| 560 | is_ipa_disconnected = false; |
| 561 | /* check if USB cable is disconnected or not */ |
| 562 | if (!port->port_usb) { |
| 563 | pr_debug("%s:%d: cable is disconnected.\n", |
| 564 | __func__, __LINE__); |
| 565 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 566 | goto disconnect_usb_bam_ipa_out; |
| 567 | } |
| 568 | |
| 569 | gport->ipa_consumer_ep = port->ipa_params.ipa_cons_ep_idx; |
| 570 | } |
| 571 | |
| 572 | if (gport->in) { |
| 573 | pr_debug("configure bam ipa connect for USB IN\n"); |
| 574 | port->ipa_params.dir = PEER_PERIPHERAL_TO_USB; |
| 575 | |
| 576 | if (port->func_type == USB_IPA_FUNC_RNDIS) { |
| 577 | port->ipa_params.notify = rndis_qc_get_ipa_tx_cb(); |
| 578 | port->ipa_params.priv = rndis_qc_get_ipa_priv(); |
| 579 | port->ipa_params.skip_ep_cfg = |
| 580 | rndis_qc_get_skip_ep_config(); |
| 581 | } else if (port->func_type == USB_IPA_FUNC_RMNET) { |
| 582 | port->ipa_params.notify = |
| 583 | teth_bridge_params.usb_notify_cb; |
| 584 | port->ipa_params.priv = |
| 585 | teth_bridge_params.private_data; |
| 586 | port->ipa_params.reset_pipe_after_lpm = |
| 587 | msm_dwc3_reset_ep_after_lpm(gadget); |
| 588 | port->ipa_params.ipa_ep_cfg.mode.mode = IPA_BASIC; |
| 589 | port->ipa_params.skip_ep_cfg = |
| 590 | teth_bridge_params.skip_ep_cfg; |
| 591 | } |
| 592 | |
| 593 | if (port->func_type == USB_IPA_FUNC_DPL) |
| 594 | port->ipa_params.dst_client = IPA_CLIENT_USB_DPL_CONS; |
| 595 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 596 | ret = usb_bam_connect_ipa(port->usb_bam_type, |
| 597 | &port->ipa_params); |
| 598 | if (ret) { |
| 599 | pr_err("usb_bam_connect_ipa IN failed err:%d\n", ret); |
| 600 | goto disconnect_usb_bam_ipa_out; |
| 601 | } |
| 602 | spin_lock_irqsave(&port->port_lock, flags); |
| 603 | is_ipa_disconnected = false; |
| 604 | /* check if USB cable is disconnected or not */ |
| 605 | if (!port->port_usb) { |
| 606 | pr_debug("%s:%d: cable is disconnected.\n", |
| 607 | __func__, __LINE__); |
| 608 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 609 | goto disconnect_usb_bam_ipa_out; |
| 610 | } |
| 611 | |
| 612 | gport->ipa_producer_ep = port->ipa_params.ipa_prod_ep_idx; |
| 613 | } |
| 614 | |
| 615 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 616 | if (port->func_type == USB_IPA_FUNC_RNDIS) { |
| 617 | rndis_data->prod_clnt_hdl = |
| 618 | port->ipa_params.prod_clnt_hdl; |
| 619 | rndis_data->cons_clnt_hdl = |
| 620 | port->ipa_params.cons_clnt_hdl; |
| 621 | rndis_data->priv = port->ipa_params.priv; |
| 622 | |
| 623 | pr_debug("ul_max_transfer_size:%d\n", |
| 624 | rndis_data->ul_max_transfer_size); |
| 625 | pr_debug("ul_max_packets_number:%d\n", |
| 626 | rndis_data->ul_max_packets_number); |
| 627 | pr_debug("dl_max_transfer_size:%d\n", |
| 628 | rndis_data->dl_max_transfer_size); |
| 629 | |
| 630 | ret = rndis_ipa_pipe_connect_notify( |
| 631 | rndis_data->cons_clnt_hdl, |
| 632 | rndis_data->prod_clnt_hdl, |
| 633 | rndis_data->ul_max_transfer_size, |
| 634 | rndis_data->ul_max_packets_number, |
| 635 | rndis_data->dl_max_transfer_size, |
| 636 | rndis_data->priv); |
| 637 | if (ret) { |
| 638 | pr_err("%s: failed to connect IPA: err:%d\n", |
| 639 | __func__, ret); |
| 640 | return; |
| 641 | } |
| 642 | atomic_set(&port->pipe_connect_notified, 1); |
| 643 | } else if (port->func_type == USB_IPA_FUNC_RMNET || |
| 644 | port->func_type == USB_IPA_FUNC_DPL) { |
| 645 | /* For RmNet and DPL need to update_ipa_pipes to qti */ |
| 646 | enum qti_port_type qti_port_type = port->func_type == |
| 647 | USB_IPA_FUNC_RMNET ? QTI_PORT_RMNET : QTI_PORT_DPL; |
| 648 | gqti_ctrl_update_ipa_pipes(port->port_usb, qti_port_type, |
| 649 | gport->ipa_producer_ep, gport->ipa_consumer_ep); |
| 650 | } |
| 651 | |
| 652 | if (port->func_type == USB_IPA_FUNC_RMNET) { |
| 653 | connect_params.ipa_usb_pipe_hdl = |
| 654 | port->ipa_params.prod_clnt_hdl; |
| 655 | connect_params.usb_ipa_pipe_hdl = |
| 656 | port->ipa_params.cons_clnt_hdl; |
| 657 | connect_params.tethering_mode = |
| 658 | TETH_TETHERING_MODE_RMNET; |
| 659 | connect_params.client_type = |
| 660 | port->ipa_params.src_client; |
| 661 | ret = teth_bridge_connect(&connect_params); |
| 662 | if (ret) { |
| 663 | pr_err("%s:teth_bridge_connect() failed\n", __func__); |
| 664 | goto disconnect_usb_bam_ipa_out; |
| 665 | } |
| 666 | } |
| 667 | |
| 668 | pr_debug("ipa_producer_ep:%d ipa_consumer_ep:%d\n", |
| 669 | gport->ipa_producer_ep, |
| 670 | gport->ipa_consumer_ep); |
| 671 | |
| 672 | pr_debug("src_bam_idx:%d dst_bam_idx:%d\n", |
| 673 | port->src_connection_idx, port->dst_connection_idx); |
| 674 | |
| 675 | /* Don't queue the transfers yet, only after network stack is up */ |
| 676 | if (port->func_type == USB_IPA_FUNC_RNDIS) { |
| 677 | pr_debug("%s: Not starting now, waiting for network notify", |
| 678 | __func__); |
| 679 | return; |
| 680 | } |
| 681 | |
| 682 | if (gport->out) |
| 683 | ipa_data_start_endless_xfer(port, false); |
| 684 | if (gport->in) |
| 685 | ipa_data_start_endless_xfer(port, true); |
| 686 | |
| 687 | pr_debug("Connect workqueue done (port %pK)", port); |
| 688 | return; |
| 689 | |
| 690 | disconnect_usb_bam_ipa_out: |
| 691 | if (!is_ipa_disconnected) { |
| 692 | usb_bam_disconnect_ipa(port->usb_bam_type, &port->ipa_params); |
| 693 | is_ipa_disconnected = true; |
| 694 | } |
| 695 | if (port->func_type == USB_IPA_FUNC_RMNET) |
| 696 | teth_bridge_disconnect(port->ipa_params.src_client); |
| 697 | unconfig_msm_ep_in: |
| 698 | spin_lock_irqsave(&port->port_lock, flags); |
| 699 | /* check if USB cable is disconnected or not */ |
| 700 | if (port->port_usb && gport->in) |
| 701 | msm_ep_unconfig(port->port_usb->in); |
| 702 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 703 | unconfig_msm_ep_out: |
| 704 | if (gport->in) |
| 705 | usb_bam_free_fifos(port->usb_bam_type, |
| 706 | port->dst_connection_idx); |
| 707 | spin_lock_irqsave(&port->port_lock, flags); |
| 708 | /* check if USB cable is disconnected or not */ |
| 709 | if (port->port_usb && gport->out) |
| 710 | msm_ep_unconfig(port->port_usb->out); |
| 711 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 712 | out: |
| 713 | if (gport->out) |
| 714 | usb_bam_free_fifos(port->usb_bam_type, |
| 715 | port->src_connection_idx); |
| 716 | spin_lock_irqsave(&port->port_lock, flags); |
| 717 | port->is_connected = false; |
| 718 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 719 | usb_gadget_autopm_put_async(port->gadget); |
| 720 | } |
| 721 | |
| 722 | /** |
| 723 | * ipa_data_connect() - Prepare IPA params and enable USB endpoints |
| 724 | * @gp: USB IPA gadget port |
| 725 | * @port_num: port number used by accelerated function |
| 726 | * @src_connection_idx: USB BAM pipe index used as producer |
| 727 | * @dst_connection_idx: USB BAM pipe index used as consumer |
| 728 | * |
| 729 | * It is being called from accelerated function driver (from set_alt()) to |
| 730 | * initiate USB BAM IPA connection. This API is enabling accelerated endpoints |
| 731 | * and schedule connect_work() which establishes USB IPA BAM communication. |
| 732 | */ |
| 733 | int ipa_data_connect(struct gadget_ipa_port *gp, enum ipa_func_type func, |
| 734 | u8 src_connection_idx, u8 dst_connection_idx) |
| 735 | { |
| 736 | struct ipa_data_ch_info *port; |
| 737 | unsigned long flags; |
| 738 | int ret = 0; |
| 739 | |
| 740 | pr_debug("dev:%pK port#%d src_connection_idx:%d dst_connection_idx:%d\n", |
| 741 | gp, func, src_connection_idx, dst_connection_idx); |
| 742 | |
| 743 | if (func >= USB_IPA_NUM_FUNCS) { |
| 744 | pr_err("invalid portno#%d\n", func); |
| 745 | ret = -ENODEV; |
| 746 | goto err; |
| 747 | } |
| 748 | |
| 749 | if (!gp) { |
| 750 | pr_err("gadget port is null\n"); |
| 751 | ret = -ENODEV; |
| 752 | goto err; |
| 753 | } |
| 754 | |
| 755 | port = ipa_data_ports[func]; |
| 756 | |
| 757 | spin_lock_irqsave(&port->port_lock, flags); |
| 758 | port->port_usb = gp; |
| 759 | port->gadget = gp->cdev->gadget; |
| 760 | |
| 761 | if (gp->out) { |
| 762 | port->rx_req = usb_ep_alloc_request(gp->out, GFP_ATOMIC); |
| 763 | if (!port->rx_req) { |
| 764 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 765 | pr_err("%s: failed to allocate rx_req\n", __func__); |
| 766 | goto err; |
| 767 | } |
| 768 | port->rx_req->context = port; |
| 769 | port->rx_req->complete = ipa_data_endless_complete; |
| 770 | port->rx_req->length = 0; |
| 771 | port->rx_req->no_interrupt = 1; |
| 772 | } |
| 773 | |
| 774 | if (gp->in) { |
| 775 | port->tx_req = usb_ep_alloc_request(gp->in, GFP_ATOMIC); |
| 776 | if (!port->tx_req) { |
| 777 | pr_err("%s: failed to allocate tx_req\n", __func__); |
| 778 | goto free_rx_req; |
| 779 | } |
| 780 | port->tx_req->context = port; |
| 781 | port->tx_req->complete = ipa_data_endless_complete; |
| 782 | port->tx_req->length = 0; |
| 783 | port->tx_req->no_interrupt = 1; |
| 784 | } |
| 785 | port->src_connection_idx = src_connection_idx; |
| 786 | port->dst_connection_idx = dst_connection_idx; |
| 787 | port->usb_bam_type = usb_bam_get_bam_type(gp->cdev->gadget->name); |
| 788 | |
| 789 | port->ipa_params.src_pipe = &(port->src_pipe_idx); |
| 790 | port->ipa_params.dst_pipe = &(port->dst_pipe_idx); |
| 791 | port->ipa_params.src_idx = src_connection_idx; |
| 792 | port->ipa_params.dst_idx = dst_connection_idx; |
| 793 | |
| 794 | /* |
| 795 | * Disable Xfer complete and Xfer not ready interrupts by |
| 796 | * marking endless flag which is used in UDC driver to enable |
| 797 | * these interrupts. with this set, these interrupts for selected |
| 798 | * endpoints won't be enabled. |
| 799 | */ |
| 800 | if (port->port_usb->in) { |
| 801 | port->port_usb->in->endless = true; |
| 802 | ret = usb_ep_enable(port->port_usb->in); |
| 803 | if (ret) { |
| 804 | pr_err("usb_ep_enable failed eptype:IN ep:%pK", |
| 805 | port->port_usb->in); |
| 806 | usb_ep_free_request(port->port_usb->in, port->tx_req); |
| 807 | port->tx_req = NULL; |
| 808 | port->port_usb->in->endless = false; |
| 809 | goto err_usb_in; |
| 810 | } |
| 811 | } |
| 812 | |
| 813 | if (port->port_usb->out) { |
| 814 | port->port_usb->out->endless = true; |
| 815 | ret = usb_ep_enable(port->port_usb->out); |
| 816 | if (ret) { |
| 817 | pr_err("usb_ep_enable failed eptype:OUT ep:%pK", |
| 818 | port->port_usb->out); |
| 819 | usb_ep_free_request(port->port_usb->out, port->rx_req); |
| 820 | port->rx_req = NULL; |
| 821 | port->port_usb->out->endless = false; |
| 822 | goto err_usb_out; |
| 823 | } |
| 824 | } |
| 825 | |
| 826 | /* Wait for host to enable flow_control */ |
| 827 | if (port->func_type == USB_IPA_FUNC_RNDIS) { |
| 828 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 829 | ret = 0; |
| 830 | return ret; |
| 831 | } |
| 832 | |
| 833 | /* |
| 834 | * Increment usage count upon cable connect. Decrement after IPA |
| 835 | * handshake is done in disconnect work (due to cable disconnect) |
| 836 | * or in suspend work. |
| 837 | */ |
| 838 | usb_gadget_autopm_get_noresume(port->gadget); |
| 839 | |
| 840 | queue_work(ipa_data_wq, &port->connect_w); |
| 841 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 842 | |
| 843 | return ret; |
| 844 | |
| 845 | err_usb_out: |
| 846 | if (port->port_usb->in) { |
| 847 | usb_ep_disable(port->port_usb->in); |
| 848 | port->port_usb->in->endless = false; |
| 849 | } |
| 850 | err_usb_in: |
| 851 | if (gp->in && port->tx_req) { |
| 852 | usb_ep_free_request(gp->in, port->tx_req); |
| 853 | port->tx_req = NULL; |
| 854 | } |
| 855 | free_rx_req: |
| 856 | if (gp->out && port->rx_req) { |
| 857 | usb_ep_free_request(gp->out, port->rx_req); |
| 858 | port->rx_req = NULL; |
| 859 | } |
| 860 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 861 | err: |
| 862 | pr_debug("%s(): failed with error:%d\n", __func__, ret); |
| 863 | return ret; |
| 864 | } |
| 865 | |
| 866 | /** |
| 867 | * ipa_data_start() - Restart USB endless transfer |
| 868 | * @param: IPA data channel information |
| 869 | * @dir: USB BAM pipe direction |
| 870 | * |
| 871 | * It is being used to restart USB endless transfer for USB bus resume. |
| 872 | * For USB consumer case, it restarts USB endless RX transfer, whereas |
| 873 | * for USB producer case, it resets DBM endpoint and restart USB endless |
| 874 | * TX transfer. |
| 875 | */ |
| 876 | static void ipa_data_start(void *param, enum usb_bam_pipe_dir dir) |
| 877 | { |
| 878 | struct ipa_data_ch_info *port = param; |
| 879 | struct usb_gadget *gadget = NULL; |
| 880 | |
| 881 | if (!port || !port->port_usb || !port->port_usb->cdev->gadget) { |
| 882 | pr_err("%s:port,cdev or gadget is NULL\n", __func__); |
| 883 | return; |
| 884 | } |
| 885 | |
| 886 | gadget = port->port_usb->cdev->gadget; |
| 887 | if (dir == USB_TO_PEER_PERIPHERAL) { |
| 888 | pr_debug("%s(): start endless RX\n", __func__); |
| 889 | ipa_data_start_endless_xfer(port, false); |
| 890 | } else { |
| 891 | pr_debug("%s(): start endless TX\n", __func__); |
| 892 | if (msm_dwc3_reset_ep_after_lpm(gadget)) { |
| 893 | configure_fifo(port->usb_bam_type, |
| 894 | port->dst_connection_idx, port->port_usb->in); |
| 895 | } |
| 896 | ipa_data_start_endless_xfer(port, true); |
| 897 | } |
| 898 | } |
| 899 | |
| 900 | /** |
| 901 | * ipa_data_stop() - Stop endless Tx/Rx transfers |
| 902 | * @param: IPA data channel information |
| 903 | * @dir: USB BAM pipe direction |
| 904 | * |
| 905 | * It is being used to stop endless Tx/Rx transfers. It is being used |
| 906 | * for USB bus suspend functionality. |
| 907 | */ |
| 908 | static void ipa_data_stop(void *param, enum usb_bam_pipe_dir dir) |
| 909 | { |
| 910 | struct ipa_data_ch_info *port = param; |
| 911 | struct usb_gadget *gadget = NULL; |
| 912 | |
| 913 | if (!port || !port->port_usb || !port->port_usb->cdev->gadget) { |
| 914 | pr_err("%s:port,cdev or gadget is NULL\n", __func__); |
| 915 | return; |
| 916 | } |
| 917 | |
| 918 | gadget = port->port_usb->cdev->gadget; |
| 919 | if (dir == USB_TO_PEER_PERIPHERAL) { |
| 920 | pr_debug("%s(): stop endless RX transfer\n", __func__); |
| 921 | ipa_data_stop_endless_xfer(port, false); |
| 922 | } else { |
| 923 | pr_debug("%s(): stop endless TX transfer\n", __func__); |
| 924 | ipa_data_stop_endless_xfer(port, true); |
| 925 | } |
| 926 | } |
| 927 | |
| 928 | void ipa_data_flush_workqueue(void) |
| 929 | { |
| 930 | pr_debug("%s(): Flushing workqueue\n", __func__); |
| 931 | flush_workqueue(ipa_data_wq); |
| 932 | } |
| 933 | |
| 934 | /** |
| 935 | * ipa_data_suspend() - Initiate USB BAM IPA suspend functionality |
| 936 | * @gp: Gadget IPA port |
| 937 | * @port_num: port number used by function |
| 938 | * |
| 939 | * It is being used to initiate USB BAM IPA suspend functionality |
| 940 | * for USB bus suspend functionality. |
| 941 | */ |
| 942 | void ipa_data_suspend(struct gadget_ipa_port *gp, enum ipa_func_type func, |
| 943 | bool remote_wakeup_enabled) |
| 944 | { |
| 945 | struct ipa_data_ch_info *port; |
| 946 | unsigned long flags; |
| 947 | |
| 948 | if (func >= USB_IPA_NUM_FUNCS) { |
| 949 | pr_err("invalid ipa portno#%d\n", func); |
| 950 | return; |
| 951 | } |
| 952 | |
| 953 | if (!gp) { |
| 954 | pr_err("data port is null\n"); |
| 955 | return; |
| 956 | } |
| 957 | pr_debug("%s: suspended port %d\n", __func__, func); |
| 958 | |
| 959 | port = ipa_data_ports[func]; |
| 960 | if (!port) { |
| 961 | pr_err("%s(): Port is NULL.\n", __func__); |
| 962 | return; |
| 963 | } |
| 964 | |
| 965 | /* suspend with remote wakeup disabled */ |
| 966 | if (!remote_wakeup_enabled) { |
| 967 | /* |
| 968 | * When remote wakeup is disabled, IPA BAM is disconnected |
| 969 | * because it cannot send new data until the USB bus is resumed. |
| 970 | * Endpoint descriptors info is saved before it gets reset by |
| 971 | * the BAM disconnect API. This lets us restore this info when |
| 972 | * the USB bus is resumed. |
| 973 | */ |
| 974 | if (gp->in) { |
| 975 | gp->in_ep_desc_backup = gp->in->desc; |
| 976 | pr_debug("in_ep_desc_backup = %pK\n", |
| 977 | gp->in_ep_desc_backup); |
| 978 | } |
| 979 | if (gp->out) { |
| 980 | gp->out_ep_desc_backup = gp->out->desc; |
| 981 | pr_debug("out_ep_desc_backup = %pK\n", |
| 982 | gp->out_ep_desc_backup); |
| 983 | } |
| 984 | ipa_data_disconnect(gp, func); |
| 985 | return; |
| 986 | } |
| 987 | |
| 988 | spin_lock_irqsave(&port->port_lock, flags); |
| 989 | queue_work(ipa_data_wq, &port->suspend_w); |
| 990 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 991 | } |
| 992 | static void bam2bam_data_suspend_work(struct work_struct *w) |
| 993 | { |
| 994 | struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, |
| 995 | connect_w); |
| 996 | unsigned long flags; |
| 997 | int ret; |
| 998 | |
| 999 | pr_debug("%s: suspend started\n", __func__); |
| 1000 | spin_lock_irqsave(&port->port_lock, flags); |
| 1001 | |
| 1002 | /* In case of RNDIS, host enables flow_control invoking connect_w. If it |
| 1003 | * is delayed then we may end up having suspend_w run before connect_w. |
| 1004 | * In this scenario, connect_w may or may not at all start if cable gets |
| 1005 | * disconnected or if host changes configuration e.g. RNDIS --> MBIM |
| 1006 | * For these cases don't do runtime_put as there was no _get yet, and |
| 1007 | * detect this condition on disconnect to not do extra pm_runtme_get |
| 1008 | * for SUSPEND --> DISCONNECT scenario. |
| 1009 | */ |
| 1010 | if (!port->is_connected) { |
| 1011 | pr_err("%s: Not yet connected. SUSPEND pending.\n", __func__); |
| 1012 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1013 | return; |
| 1014 | } |
| 1015 | ret = usb_bam_register_wake_cb(port->usb_bam_type, |
| 1016 | port->dst_connection_idx, NULL, port); |
| 1017 | if (ret) { |
| 1018 | pr_err("%s(): Failed to register BAM wake callback.\n", |
| 1019 | __func__); |
| 1020 | return; |
| 1021 | } |
| 1022 | |
| 1023 | usb_bam_register_start_stop_cbs(port->usb_bam_type, |
| 1024 | port->dst_connection_idx, ipa_data_start, |
| 1025 | ipa_data_stop, port); |
| 1026 | /* |
| 1027 | * release lock here because bam_data_start() or |
| 1028 | * bam_data_stop() called from usb_bam_suspend() |
| 1029 | * re-acquires port lock. |
| 1030 | */ |
| 1031 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1032 | usb_bam_suspend(port->usb_bam_type, &port->ipa_params); |
| 1033 | spin_lock_irqsave(&port->port_lock, flags); |
| 1034 | |
| 1035 | /* |
| 1036 | * Decrement usage count after IPA handshake is done |
| 1037 | * to allow gadget parent to go to lpm. This counter was |
| 1038 | * incremented upon cable connect. |
| 1039 | */ |
| 1040 | usb_gadget_autopm_put_async(port->gadget); |
| 1041 | |
| 1042 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1043 | } |
| 1044 | |
| 1045 | /** |
| 1046 | * ipa_data_resume() - Initiate USB resume functionality |
| 1047 | * @gp: Gadget IPA port |
| 1048 | * @port_num: port number used by function |
| 1049 | * |
| 1050 | * It is being used to initiate USB resume functionality |
| 1051 | * for USB bus resume case. |
| 1052 | */ |
| 1053 | void ipa_data_resume(struct gadget_ipa_port *gp, enum ipa_func_type func, |
| 1054 | bool remote_wakeup_enabled) |
| 1055 | { |
| 1056 | struct ipa_data_ch_info *port; |
| 1057 | unsigned long flags; |
| 1058 | struct usb_gadget *gadget = NULL; |
| 1059 | u8 src_connection_idx = 0; |
| 1060 | u8 dst_connection_idx = 0; |
| 1061 | enum usb_ctrl usb_bam_type; |
| 1062 | |
| 1063 | pr_debug("dev:%pK port number:%d\n", gp, func); |
| 1064 | |
| 1065 | if (func >= USB_IPA_NUM_FUNCS) { |
| 1066 | pr_err("invalid ipa portno#%d\n", func); |
| 1067 | return; |
| 1068 | } |
| 1069 | |
| 1070 | if (!gp) { |
| 1071 | pr_err("data port is null\n"); |
| 1072 | return; |
| 1073 | } |
| 1074 | |
| 1075 | port = ipa_data_ports[func]; |
| 1076 | if (!port) { |
| 1077 | pr_err("port %u is NULL", func); |
| 1078 | return; |
| 1079 | } |
| 1080 | |
| 1081 | gadget = gp->cdev->gadget; |
| 1082 | /* resume with remote wakeup disabled */ |
| 1083 | if (!remote_wakeup_enabled) { |
| 1084 | int bam_pipe_num = (func == USB_IPA_FUNC_DPL) ? 1 : 0; |
| 1085 | |
| 1086 | usb_bam_type = usb_bam_get_bam_type(gadget->name); |
| 1087 | /* Restore endpoint descriptors info. */ |
| 1088 | if (gp->in) { |
| 1089 | gp->in->desc = gp->in_ep_desc_backup; |
| 1090 | pr_debug("in_ep_desc_backup = %pK\n", |
| 1091 | gp->in_ep_desc_backup); |
| 1092 | dst_connection_idx = usb_bam_get_connection_idx( |
| 1093 | usb_bam_type, IPA_P_BAM, PEER_PERIPHERAL_TO_USB, |
| 1094 | USB_BAM_DEVICE, bam_pipe_num); |
| 1095 | } |
| 1096 | if (gp->out) { |
| 1097 | gp->out->desc = gp->out_ep_desc_backup; |
| 1098 | pr_debug("out_ep_desc_backup = %pK\n", |
| 1099 | gp->out_ep_desc_backup); |
| 1100 | src_connection_idx = usb_bam_get_connection_idx( |
| 1101 | usb_bam_type, IPA_P_BAM, USB_TO_PEER_PERIPHERAL, |
| 1102 | USB_BAM_DEVICE, bam_pipe_num); |
| 1103 | } |
| 1104 | ipa_data_connect(gp, func, |
| 1105 | src_connection_idx, dst_connection_idx); |
| 1106 | return; |
| 1107 | } |
| 1108 | |
| 1109 | spin_lock_irqsave(&port->port_lock, flags); |
| 1110 | |
| 1111 | /* |
| 1112 | * Increment usage count here to disallow gadget |
| 1113 | * parent suspend. This counter will decrement |
| 1114 | * after IPA handshake is done in disconnect work |
| 1115 | * (due to cable disconnect) or in bam_data_disconnect |
| 1116 | * in suspended state. |
| 1117 | */ |
| 1118 | usb_gadget_autopm_get_noresume(port->gadget); |
| 1119 | queue_work(ipa_data_wq, &port->resume_w); |
| 1120 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1121 | } |
| 1122 | |
| 1123 | static void bam2bam_data_resume_work(struct work_struct *w) |
| 1124 | { |
| 1125 | struct ipa_data_ch_info *port = container_of(w, struct ipa_data_ch_info, |
| 1126 | connect_w); |
| 1127 | struct usb_gadget *gadget; |
| 1128 | unsigned long flags; |
| 1129 | int ret; |
| 1130 | |
| 1131 | spin_lock_irqsave(&port->port_lock, flags); |
| 1132 | if (!port->port_usb || !port->port_usb->cdev) { |
| 1133 | pr_err("port->port_usb or cdev is NULL"); |
| 1134 | goto exit; |
| 1135 | } |
| 1136 | |
| 1137 | if (!port->port_usb->cdev->gadget) { |
| 1138 | pr_err("port->port_usb->cdev->gadget is NULL"); |
| 1139 | goto exit; |
| 1140 | } |
| 1141 | |
| 1142 | pr_debug("%s: resume started\n", __func__); |
| 1143 | gadget = port->port_usb->cdev->gadget; |
| 1144 | if (!gadget) { |
| 1145 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1146 | pr_err("%s(): Gadget is NULL.\n", __func__); |
| 1147 | return; |
| 1148 | } |
| 1149 | |
| 1150 | ret = usb_bam_register_wake_cb(port->usb_bam_type, |
| 1151 | port->dst_connection_idx, NULL, NULL); |
| 1152 | if (ret) { |
| 1153 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1154 | pr_err("%s(): Failed to register BAM wake callback.\n", |
| 1155 | __func__); |
| 1156 | return; |
| 1157 | } |
| 1158 | |
| 1159 | if (msm_dwc3_reset_ep_after_lpm(gadget)) { |
| 1160 | configure_fifo(port->usb_bam_type, port->src_connection_idx, |
| 1161 | port->port_usb->out); |
| 1162 | configure_fifo(port->usb_bam_type, port->dst_connection_idx, |
| 1163 | port->port_usb->in); |
| 1164 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1165 | msm_dwc3_reset_dbm_ep(port->port_usb->in); |
| 1166 | spin_lock_irqsave(&port->port_lock, flags); |
| 1167 | usb_bam_resume(port->usb_bam_type, &port->ipa_params); |
| 1168 | } |
| 1169 | |
| 1170 | exit: |
| 1171 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1172 | } |
| 1173 | |
| 1174 | /** |
| 1175 | * ipa_data_port_alloc() - Allocate IPA USB Port structure |
| 1176 | * @portno: port number to be used by particular USB function |
| 1177 | * |
| 1178 | * It is being used by USB function driver to allocate IPA data port |
| 1179 | * for USB IPA data accelerated path. |
| 1180 | * |
| 1181 | * Retrun: 0 in case of success, otherwise errno. |
| 1182 | */ |
| 1183 | static int ipa_data_port_alloc(enum ipa_func_type func) |
| 1184 | { |
| 1185 | struct ipa_data_ch_info *port = NULL; |
| 1186 | |
| 1187 | if (ipa_data_ports[func] != NULL) { |
| 1188 | pr_debug("port %d already allocated.\n", func); |
| 1189 | return 0; |
| 1190 | } |
| 1191 | |
| 1192 | port = kzalloc(sizeof(struct ipa_data_ch_info), GFP_KERNEL); |
| 1193 | if (!port) |
| 1194 | return -ENOMEM; |
| 1195 | |
| 1196 | ipa_data_ports[func] = port; |
| 1197 | |
| 1198 | pr_debug("port:%pK with portno:%d allocated\n", port, func); |
| 1199 | return 0; |
| 1200 | } |
| 1201 | |
| 1202 | /** |
| 1203 | * ipa_data_port_select() - Select particular port for BAM2BAM IPA mode |
| 1204 | * @portno: port number to be used by particular USB function |
| 1205 | * @func_type: USB gadget function type |
| 1206 | * |
| 1207 | * It is being used by USB function driver to select which BAM2BAM IPA |
| 1208 | * port particular USB function wants to use. |
| 1209 | * |
| 1210 | */ |
| 1211 | void ipa_data_port_select(enum ipa_func_type func) |
| 1212 | { |
| 1213 | struct ipa_data_ch_info *port = NULL; |
| 1214 | |
| 1215 | pr_debug("portno:%d\n", func); |
| 1216 | |
| 1217 | port = ipa_data_ports[func]; |
| 1218 | port->port_num = func; |
| 1219 | port->is_connected = false; |
| 1220 | |
| 1221 | spin_lock_init(&port->port_lock); |
| 1222 | |
| 1223 | if (!work_pending(&port->connect_w)) |
| 1224 | INIT_WORK(&port->connect_w, ipa_data_connect_work); |
| 1225 | |
| 1226 | if (!work_pending(&port->disconnect_w)) |
| 1227 | INIT_WORK(&port->disconnect_w, ipa_data_disconnect_work); |
| 1228 | |
| 1229 | INIT_WORK(&port->suspend_w, bam2bam_data_suspend_work); |
| 1230 | INIT_WORK(&port->resume_w, bam2bam_data_resume_work); |
| 1231 | |
| 1232 | port->ipa_params.src_client = IPA_CLIENT_USB_PROD; |
| 1233 | port->ipa_params.dst_client = IPA_CLIENT_USB_CONS; |
| 1234 | port->func_type = func; |
| 1235 | }; |
| 1236 | |
| 1237 | void ipa_data_free(enum ipa_func_type func) |
| 1238 | { |
| 1239 | pr_debug("freeing %d IPA BAM port", func); |
| 1240 | |
| 1241 | kfree(ipa_data_ports[func]); |
| 1242 | ipa_data_ports[func] = NULL; |
| 1243 | if (func == USB_IPA_FUNC_RNDIS) |
| 1244 | kfree(rndis_data); |
| 1245 | if (ipa_data_wq) { |
| 1246 | destroy_workqueue(ipa_data_wq); |
| 1247 | ipa_data_wq = NULL; |
| 1248 | } |
| 1249 | } |
| 1250 | |
| 1251 | /** |
| 1252 | * ipa_data_setup() - setup BAM2BAM IPA port |
| 1253 | * |
| 1254 | * Each USB function who wants to use BAM2BAM IPA port would |
| 1255 | * be counting number of IPA port to use and initialize those |
| 1256 | * ports at time of bind_config() in android gadget driver. |
| 1257 | * |
| 1258 | * Retrun: 0 in case of success, otherwise errno. |
| 1259 | */ |
| 1260 | int ipa_data_setup(enum ipa_func_type func) |
| 1261 | { |
| 1262 | int ret; |
| 1263 | |
| 1264 | pr_debug("requested %d IPA BAM port", func); |
| 1265 | |
| 1266 | if (func >= USB_IPA_NUM_FUNCS) { |
| 1267 | pr_err("Invalid num of ports count:%d\n", func); |
| 1268 | return -EINVAL; |
| 1269 | } |
| 1270 | |
| 1271 | ret = ipa_data_port_alloc(func); |
| 1272 | if (ret) { |
| 1273 | pr_err("Failed to alloc port:%d\n", func); |
| 1274 | return ret; |
| 1275 | } |
| 1276 | |
| 1277 | if (func == USB_IPA_FUNC_RNDIS) { |
| 1278 | rndis_data = kzalloc(sizeof(*rndis_data), GFP_KERNEL); |
| 1279 | if (!rndis_data) { |
| 1280 | pr_err("%s: fail allocate and initialize new instance\n", |
| 1281 | __func__); |
| 1282 | goto free_ipa_ports; |
| 1283 | } |
| 1284 | } |
| 1285 | if (ipa_data_wq) { |
| 1286 | pr_debug("ipa_data_wq is already setup."); |
| 1287 | return 0; |
| 1288 | } |
| 1289 | |
| 1290 | ipa_data_wq = alloc_workqueue("k_usb_ipa_data", |
| 1291 | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); |
| 1292 | if (!ipa_data_wq) { |
| 1293 | pr_err("Failed to create workqueue\n"); |
| 1294 | ret = -ENOMEM; |
| 1295 | goto free_rndis_data; |
| 1296 | } |
| 1297 | |
| 1298 | return 0; |
| 1299 | |
| 1300 | free_rndis_data: |
| 1301 | if (func == USB_IPA_FUNC_RNDIS) |
| 1302 | kfree(rndis_data); |
| 1303 | free_ipa_ports: |
| 1304 | kfree(ipa_data_ports[func]); |
| 1305 | ipa_data_ports[func] = NULL; |
| 1306 | |
| 1307 | return ret; |
| 1308 | } |
| 1309 | |
| 1310 | void ipa_data_set_ul_max_xfer_size(u32 max_transfer_size) |
| 1311 | { |
| 1312 | if (!max_transfer_size) { |
| 1313 | pr_err("%s: invalid parameters\n", __func__); |
| 1314 | return; |
| 1315 | } |
| 1316 | rndis_data->ul_max_transfer_size = max_transfer_size; |
| 1317 | pr_debug("%s(): ul_max_xfer_size:%d\n", __func__, max_transfer_size); |
| 1318 | } |
| 1319 | |
| 1320 | void ipa_data_set_dl_max_xfer_size(u32 max_transfer_size) |
| 1321 | { |
| 1322 | |
| 1323 | if (!max_transfer_size) { |
| 1324 | pr_err("%s: invalid parameters\n", __func__); |
| 1325 | return; |
| 1326 | } |
| 1327 | rndis_data->dl_max_transfer_size = max_transfer_size; |
| 1328 | pr_debug("%s(): dl_max_xfer_size:%d\n", __func__, max_transfer_size); |
| 1329 | } |
| 1330 | |
| 1331 | void ipa_data_set_ul_max_pkt_num(u8 max_packets_number) |
| 1332 | { |
| 1333 | if (!max_packets_number) { |
| 1334 | pr_err("%s: invalid parameters\n", __func__); |
| 1335 | return; |
| 1336 | } |
| 1337 | |
| 1338 | rndis_data->ul_max_packets_number = max_packets_number; |
| 1339 | |
| 1340 | if (max_packets_number > 1) |
| 1341 | rndis_data->ul_aggregation_enable = true; |
| 1342 | else |
| 1343 | rndis_data->ul_aggregation_enable = false; |
| 1344 | |
| 1345 | pr_debug("%s(): ul_aggregation enable:%d ul_max_packets_number:%d\n", |
| 1346 | __func__, rndis_data->ul_aggregation_enable, |
| 1347 | max_packets_number); |
| 1348 | } |
| 1349 | |
| 1350 | void ipa_data_start_rndis_ipa(enum ipa_func_type func) |
| 1351 | { |
| 1352 | struct ipa_data_ch_info *port; |
| 1353 | |
| 1354 | pr_debug("%s\n", __func__); |
| 1355 | |
| 1356 | port = ipa_data_ports[func]; |
| 1357 | if (!port) { |
| 1358 | pr_err("%s: port is NULL", __func__); |
| 1359 | return; |
| 1360 | } |
| 1361 | |
| 1362 | if (atomic_read(&port->pipe_connect_notified)) { |
| 1363 | pr_debug("%s: Transfers already started?\n", __func__); |
| 1364 | return; |
| 1365 | } |
| 1366 | /* |
| 1367 | * Increment usage count upon cable connect. Decrement after IPA |
| 1368 | * handshake is done in disconnect work due to cable disconnect |
| 1369 | * or in suspend work. |
| 1370 | */ |
| 1371 | usb_gadget_autopm_get_noresume(port->gadget); |
| 1372 | queue_work(ipa_data_wq, &port->connect_w); |
| 1373 | } |
| 1374 | |
| 1375 | void ipa_data_stop_rndis_ipa(enum ipa_func_type func) |
| 1376 | { |
| 1377 | struct ipa_data_ch_info *port; |
| 1378 | unsigned long flags; |
| 1379 | |
| 1380 | pr_debug("%s\n", __func__); |
| 1381 | |
| 1382 | port = ipa_data_ports[func]; |
| 1383 | if (!port) { |
| 1384 | pr_err("%s: port is NULL", __func__); |
| 1385 | return; |
| 1386 | } |
| 1387 | |
| 1388 | if (!atomic_read(&port->pipe_connect_notified)) |
| 1389 | return; |
| 1390 | |
| 1391 | rndis_ipa_reset_trigger(); |
| 1392 | ipa_data_stop_endless_xfer(port, true); |
| 1393 | ipa_data_stop_endless_xfer(port, false); |
| 1394 | spin_lock_irqsave(&port->port_lock, flags); |
| 1395 | /* check if USB cable is disconnected or not */ |
| 1396 | if (port->port_usb) { |
| 1397 | msm_ep_unconfig(port->port_usb->in); |
| 1398 | msm_ep_unconfig(port->port_usb->out); |
| 1399 | } |
| 1400 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 1401 | queue_work(ipa_data_wq, &port->disconnect_w); |
| 1402 | } |