blob: 693dc6159746afc2475a551b8d562d3890958960 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070047#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070048
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
Mayank Ranaf4918d32016-12-15 13:35:55 -0800120#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
121#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
122#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
123#define DWC3_GEVENT_TYPE_GSI 0x3
124
Mayank Rana511f3b22016-08-02 12:00:11 -0700125struct dwc3_msm_req_complete {
126 struct list_head list_item;
127 struct usb_request *req;
128 void (*orig_complete)(struct usb_ep *ep,
129 struct usb_request *req);
130};
131
132enum dwc3_id_state {
133 DWC3_ID_GROUND = 0,
134 DWC3_ID_FLOAT,
135};
136
137/* for type c cable */
138enum plug_orientation {
139 ORIENTATION_NONE,
140 ORIENTATION_CC1,
141 ORIENTATION_CC2,
142};
143
144/* Input bits to state machine (mdwc->inputs) */
145
146#define ID 0
147#define B_SESS_VLD 1
148#define B_SUSPEND 2
149
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530150#define PM_QOS_SAMPLE_SEC 2
151#define PM_QOS_THRESHOLD 400
152
Mayank Rana511f3b22016-08-02 12:00:11 -0700153struct dwc3_msm {
154 struct device *dev;
155 void __iomem *base;
156 void __iomem *ahb2phy_base;
157 struct platform_device *dwc3;
158 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
159 struct list_head req_complete_list;
160 struct clk *xo_clk;
161 struct clk *core_clk;
162 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800163 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700164 struct clk *iface_clk;
165 struct clk *sleep_clk;
166 struct clk *utmi_clk;
167 unsigned int utmi_clk_rate;
168 struct clk *utmi_clk_src;
169 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530170 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700171 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530172 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700173 struct regulator *dwc3_gdsc;
174
175 struct usb_phy *hs_phy, *ss_phy;
176
177 struct dbm *dbm;
178
179 /* VBUS regulator for host mode */
180 struct regulator *vbus_reg;
181 int vbus_retry_count;
182 bool resume_pending;
183 atomic_t pm_suspended;
184 int hs_phy_irq;
185 int ss_phy_irq;
186 struct work_struct resume_work;
187 struct work_struct restart_usb_work;
188 bool in_restart;
189 struct workqueue_struct *dwc3_wq;
190 struct delayed_work sm_work;
191 unsigned long inputs;
192 unsigned int max_power;
193 bool charging_disabled;
194 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700195 struct work_struct bus_vote_w;
196 unsigned int bus_vote;
197 u32 bus_perf_client;
198 struct msm_bus_scale_pdata *bus_scale_table;
199 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700200 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700201 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800202 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700203 unsigned int tx_fifo_size;
204 bool vbus_active;
205 bool suspend;
206 bool disable_host_mode_pm;
207 enum dwc3_id_state id_state;
208 unsigned long lpm_flags;
209#define MDWC3_SS_PHY_SUSPEND BIT(0)
210#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
211#define MDWC3_POWER_COLLAPSE BIT(2)
212
213 unsigned int irq_to_affin;
214 struct notifier_block dwc3_cpu_notifier;
215
216 struct extcon_dev *extcon_vbus;
217 struct extcon_dev *extcon_id;
218 struct notifier_block vbus_nb;
219 struct notifier_block id_nb;
220
Jack Pham4d4e9342016-12-07 19:25:02 -0800221 struct notifier_block host_nb;
222
Mayank Rana511f3b22016-08-02 12:00:11 -0700223 int pwr_event_irq;
224 atomic_t in_p3;
225 unsigned int lpm_to_suspend_delay;
226 bool init;
227 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800228 u32 num_gsi_event_buffers;
229 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530230 int pm_qos_latency;
231 struct pm_qos_request pm_qos_req_dma;
232 struct delayed_work perf_vote_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700233};
234
235#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
236#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
237#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
238
239#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
240#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
241#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
242
243#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
244#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
245#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
246
247#define DSTS_CONNECTSPD_SS 0x4
248
249
250static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
251static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800252static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana511f3b22016-08-02 12:00:11 -0700253/**
254 *
255 * Read register with debug info.
256 *
257 * @base - DWC3 base virtual address.
258 * @offset - register offset.
259 *
260 * @return u32
261 */
262static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
263{
264 u32 val = ioread32(base + offset);
265 return val;
266}
267
268/**
269 * Read register masked field with debug info.
270 *
271 * @base - DWC3 base virtual address.
272 * @offset - register offset.
273 * @mask - register bitmask.
274 *
275 * @return u32
276 */
277static inline u32 dwc3_msm_read_reg_field(void *base,
278 u32 offset,
279 const u32 mask)
280{
281 u32 shift = find_first_bit((void *)&mask, 32);
282 u32 val = ioread32(base + offset);
283
284 val &= mask; /* clear other bits */
285 val >>= shift;
286 return val;
287}
288
289/**
290 *
291 * Write register with debug info.
292 *
293 * @base - DWC3 base virtual address.
294 * @offset - register offset.
295 * @val - value to write.
296 *
297 */
298static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
299{
300 iowrite32(val, base + offset);
301}
302
303/**
304 * Write register masked field with debug info.
305 *
306 * @base - DWC3 base virtual address.
307 * @offset - register offset.
308 * @mask - register bitmask.
309 * @val - value to write.
310 *
311 */
312static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
313 const u32 mask, u32 val)
314{
315 u32 shift = find_first_bit((void *)&mask, 32);
316 u32 tmp = ioread32(base + offset);
317
318 tmp &= ~mask; /* clear written bits */
319 val = tmp | (val << shift);
320 iowrite32(val, base + offset);
321}
322
323/**
324 * Write register and read back masked value to confirm it is written
325 *
326 * @base - DWC3 base virtual address.
327 * @offset - register offset.
328 * @mask - register bitmask specifying what should be updated
329 * @val - value to write.
330 *
331 */
332static inline void dwc3_msm_write_readback(void *base, u32 offset,
333 const u32 mask, u32 val)
334{
335 u32 write_val, tmp = ioread32(base + offset);
336
337 tmp &= ~mask; /* retain other bits */
338 write_val = tmp | val;
339
340 iowrite32(write_val, base + offset);
341
342 /* Read back to see if val was written */
343 tmp = ioread32(base + offset);
344 tmp &= mask; /* clear other bits */
345
346 if (tmp != val)
347 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
348 __func__, val, offset);
349}
350
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800351static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
352{
353 int i, num_ports;
354 u32 reg;
355
356 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
357 num_ports = HCS_MAX_PORTS(reg);
358
359 for (i = 0; i < num_ports; i++) {
360 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
361 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
362 return true;
363 }
364
365 return false;
366}
367
Mayank Rana511f3b22016-08-02 12:00:11 -0700368static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
369{
370 int i, num_ports;
371 u32 reg;
372
373 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
374 num_ports = HCS_MAX_PORTS(reg);
375
376 for (i = 0; i < num_ports; i++) {
377 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
378 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
379 return true;
380 }
381
382 return false;
383}
384
385static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
386{
387 u8 speed;
388
389 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
390 return !!(speed & DSTS_CONNECTSPD_SS);
391}
392
393static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
394{
395 if (mdwc->in_host_mode)
396 return dwc3_msm_is_host_superspeed(mdwc);
397
398 return dwc3_msm_is_dev_superspeed(mdwc);
399}
400
401#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
402/**
403 * Configure the DBM with the BAM's data fifo.
404 * This function is called by the USB BAM Driver
405 * upon initialization.
406 *
407 * @ep - pointer to usb endpoint.
408 * @addr - address of data fifo.
409 * @size - size of data fifo.
410 *
411 */
412int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
413 u32 size, u8 dst_pipe_idx)
414{
415 struct dwc3_ep *dep = to_dwc3_ep(ep);
416 struct dwc3 *dwc = dep->dwc;
417 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
418
419 dev_dbg(mdwc->dev, "%s\n", __func__);
420
421 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
422 dst_pipe_idx);
423}
424
425
426/**
427* Cleanups for msm endpoint on request complete.
428*
429* Also call original request complete.
430*
431* @usb_ep - pointer to usb_ep instance.
432* @request - pointer to usb_request instance.
433*
434* @return int - 0 on success, negative on error.
435*/
436static void dwc3_msm_req_complete_func(struct usb_ep *ep,
437 struct usb_request *request)
438{
439 struct dwc3_ep *dep = to_dwc3_ep(ep);
440 struct dwc3 *dwc = dep->dwc;
441 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
442 struct dwc3_msm_req_complete *req_complete = NULL;
443
444 /* Find original request complete function and remove it from list */
445 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
446 if (req_complete->req == request)
447 break;
448 }
449 if (!req_complete || req_complete->req != request) {
450 dev_err(dep->dwc->dev, "%s: could not find the request\n",
451 __func__);
452 return;
453 }
454 list_del(&req_complete->list_item);
455
456 /*
457 * Release another one TRB to the pool since DBM queue took 2 TRBs
458 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
459 * released only one.
460 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700461 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700462
463 /* Unconfigure dbm ep */
464 dbm_ep_unconfig(mdwc->dbm, dep->number);
465
466 /*
467 * If this is the last endpoint we unconfigured, than reset also
468 * the event buffers; unless unconfiguring the ep due to lpm,
469 * in which case the event buffer only gets reset during the
470 * block reset.
471 */
472 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
473 !dbm_reset_ep_after_lpm(mdwc->dbm))
474 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
475
476 /*
477 * Call original complete function, notice that dwc->lock is already
478 * taken by the caller of this function (dwc3_gadget_giveback()).
479 */
480 request->complete = req_complete->orig_complete;
481 if (request->complete)
482 request->complete(ep, request);
483
484 kfree(req_complete);
485}
486
487
488/**
489* Helper function
490*
491* Reset DBM endpoint.
492*
493* @mdwc - pointer to dwc3_msm instance.
494* @dep - pointer to dwc3_ep instance.
495*
496* @return int - 0 on success, negative on error.
497*/
498static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
499{
500 int ret;
501
502 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
503
504 /* Reset the dbm endpoint */
505 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
506 if (ret) {
507 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
508 __func__);
509 return ret;
510 }
511
512 /*
513 * The necessary delay between asserting and deasserting the dbm ep
514 * reset is based on the number of active endpoints. If there is more
515 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
516 * delay will suffice.
517 */
518 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
519 usleep_range(1000, 1200);
520 else
521 udelay(10);
522 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
523 if (ret) {
524 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
525 __func__);
526 return ret;
527 }
528
529 return 0;
530}
531
532/**
533* Reset the DBM endpoint which is linked to the given USB endpoint.
534*
535* @usb_ep - pointer to usb_ep instance.
536*
537* @return int - 0 on success, negative on error.
538*/
539
540int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
541{
542 struct dwc3_ep *dep = to_dwc3_ep(ep);
543 struct dwc3 *dwc = dep->dwc;
544 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
545
546 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
547}
548EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
549
550
551/**
552* Helper function.
553* See the header of the dwc3_msm_ep_queue function.
554*
555* @dwc3_ep - pointer to dwc3_ep instance.
556* @req - pointer to dwc3_request instance.
557*
558* @return int - 0 on success, negative on error.
559*/
560static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
561{
562 struct dwc3_trb *trb;
563 struct dwc3_trb *trb_link;
564 struct dwc3_gadget_ep_cmd_params params;
565 u32 cmd;
566 int ret = 0;
567
Mayank Rana83ad5822016-08-09 14:17:22 -0700568 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700569 * this request is issued with start transfer. The request will be out
570 * from this list in 2 cases. The first is that the transfer will be
571 * completed (not if the transfer is endless using a circular TRBs with
572 * with link TRB). The second case is an option to do stop stransfer,
573 * this can be initiated by the function driver when calling dequeue.
574 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700575 req->started = true;
576 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700577
578 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700579 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
580 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700581 memset(trb, 0, sizeof(*trb));
582
583 req->trb = trb;
584 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
585 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
586 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
587 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
588 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
589
590 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700591 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
592 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700593 memset(trb_link, 0, sizeof(*trb_link));
594
595 trb_link->bpl = lower_32_bits(req->trb_dma);
596 trb_link->bph = DBM_TRB_BIT |
597 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
598 trb_link->size = 0;
599 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
600
601 /*
602 * Now start the transfer
603 */
604 memset(&params, 0, sizeof(params));
605 params.param0 = 0; /* TDAddr High */
606 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
607
608 /* DBM requires IOC to be set */
609 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700610 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700611 if (ret < 0) {
612 dev_dbg(dep->dwc->dev,
613 "%s: failed to send STARTTRANSFER command\n",
614 __func__);
615
616 list_del(&req->list);
617 return ret;
618 }
619 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700620 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700621
622 return ret;
623}
624
625/**
626* Queue a usb request to the DBM endpoint.
627* This function should be called after the endpoint
628* was enabled by the ep_enable.
629*
630* This function prepares special structure of TRBs which
631* is familiar with the DBM HW, so it will possible to use
632* this endpoint in DBM mode.
633*
634* The TRBs prepared by this function, is one normal TRB
635* which point to a fake buffer, followed by a link TRB
636* that points to the first TRB.
637*
638* The API of this function follow the regular API of
639* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
640*
641* @usb_ep - pointer to usb_ep instance.
642* @request - pointer to usb_request instance.
643* @gfp_flags - possible flags.
644*
645* @return int - 0 on success, negative on error.
646*/
647static int dwc3_msm_ep_queue(struct usb_ep *ep,
648 struct usb_request *request, gfp_t gfp_flags)
649{
650 struct dwc3_request *req = to_dwc3_request(request);
651 struct dwc3_ep *dep = to_dwc3_ep(ep);
652 struct dwc3 *dwc = dep->dwc;
653 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
654 struct dwc3_msm_req_complete *req_complete;
655 unsigned long flags;
656 int ret = 0, size;
657 u8 bam_pipe;
658 bool producer;
659 bool disable_wb;
660 bool internal_mem;
661 bool ioc;
662 bool superspeed;
663
664 if (!(request->udc_priv & MSM_SPS_MODE)) {
665 /* Not SPS mode, call original queue */
666 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
667 __func__);
668
669 return (mdwc->original_ep_ops[dep->number])->queue(ep,
670 request,
671 gfp_flags);
672 }
673
674 /* HW restriction regarding TRB size (8KB) */
675 if (req->request.length < 0x2000) {
676 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
677 return -EINVAL;
678 }
679
680 /*
681 * Override req->complete function, but before doing that,
682 * store it's original pointer in the req_complete_list.
683 */
684 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
685 if (!req_complete)
686 return -ENOMEM;
687
688 req_complete->req = request;
689 req_complete->orig_complete = request->complete;
690 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
691 request->complete = dwc3_msm_req_complete_func;
692
693 /*
694 * Configure the DBM endpoint
695 */
696 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
697 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
698 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
699 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
700 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
701
702 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
703 disable_wb, internal_mem, ioc);
704 if (ret < 0) {
705 dev_err(mdwc->dev,
706 "error %d after calling dbm_ep_config\n", ret);
707 return ret;
708 }
709
710 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
711 __func__, request, ep->name, request->length);
712 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
713 dbm_event_buffer_config(mdwc->dbm,
714 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
715 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
716 DWC3_GEVNTSIZ_SIZE(size));
717
718 /*
719 * We must obtain the lock of the dwc3 core driver,
720 * including disabling interrupts, so we will be sure
721 * that we are the only ones that configure the HW device
722 * core and ensure that we queuing the request will finish
723 * as soon as possible so we will release back the lock.
724 */
725 spin_lock_irqsave(&dwc->lock, flags);
726 if (!dep->endpoint.desc) {
727 dev_err(mdwc->dev,
728 "%s: trying to queue request %p to disabled ep %s\n",
729 __func__, request, ep->name);
730 ret = -EPERM;
731 goto err;
732 }
733
734 if (dep->number == 0 || dep->number == 1) {
735 dev_err(mdwc->dev,
736 "%s: trying to queue dbm request %p to control ep %s\n",
737 __func__, request, ep->name);
738 ret = -EPERM;
739 goto err;
740 }
741
742
Mayank Rana83ad5822016-08-09 14:17:22 -0700743 if (dep->trb_dequeue != dep->trb_enqueue ||
744 !list_empty(&dep->pending_list)
745 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700746 dev_err(mdwc->dev,
747 "%s: trying to queue dbm request %p tp ep %s\n",
748 __func__, request, ep->name);
749 ret = -EPERM;
750 goto err;
751 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700752 dep->trb_dequeue = 0;
753 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700754 }
755
756 ret = __dwc3_msm_ep_queue(dep, req);
757 if (ret < 0) {
758 dev_err(mdwc->dev,
759 "error %d after calling __dwc3_msm_ep_queue\n", ret);
760 goto err;
761 }
762
763 spin_unlock_irqrestore(&dwc->lock, flags);
764 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
765 dbm_set_speed(mdwc->dbm, (u8)superspeed);
766
767 return 0;
768
769err:
770 spin_unlock_irqrestore(&dwc->lock, flags);
771 kfree(req_complete);
772 return ret;
773}
774
775/*
776* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
777*
778* @usb_ep - pointer to usb_ep instance.
779*
780* @return int - XferRscIndex
781*/
782static inline int gsi_get_xfer_index(struct usb_ep *ep)
783{
784 struct dwc3_ep *dep = to_dwc3_ep(ep);
785
786 return dep->resource_index;
787}
788
789/*
790* Fills up the GSI channel information needed in call to IPA driver
791* for GSI channel creation.
792*
793* @usb_ep - pointer to usb_ep instance.
794* @ch_info - output parameter with requested channel info
795*/
796static void gsi_get_channel_info(struct usb_ep *ep,
797 struct gsi_channel_info *ch_info)
798{
799 struct dwc3_ep *dep = to_dwc3_ep(ep);
800 int last_trb_index = 0;
801 struct dwc3 *dwc = dep->dwc;
802 struct usb_gsi_request *request = ch_info->ch_req;
803
804 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
805 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700806 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700807 ch_info->depcmd_hi_addr = 0;
808
809 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
810 &dep->trb_pool[0]);
811 /* Convert to multipled of 1KB */
812 ch_info->const_buffer_size = request->buf_len/1024;
813
814 /* IN direction */
815 if (dep->direction) {
816 /*
817 * Multiply by size of each TRB for xfer_ring_len in bytes.
818 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
819 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
820 */
821 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
822 last_trb_index = 2 * request->num_bufs + 2;
823 } else { /* OUT direction */
824 /*
825 * Multiply by size of each TRB for xfer_ring_len in bytes.
826 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
827 * LINK TRB.
828 */
829 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
830 last_trb_index = request->num_bufs + 1;
831 }
832
833 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
834 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
835 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
836 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
837 DWC3_GEVNTCOUNT(ep->ep_intr_num));
838 ch_info->gevntcount_hi_addr = 0;
839
840 dev_dbg(dwc->dev,
841 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
842 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
843 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
844}
845
846/*
847* Perform StartXfer on GSI EP. Stores XferRscIndex.
848*
849* @usb_ep - pointer to usb_ep instance.
850*
851* @return int - 0 on success
852*/
853static int gsi_startxfer_for_ep(struct usb_ep *ep)
854{
855 int ret;
856 struct dwc3_gadget_ep_cmd_params params;
857 u32 cmd;
858 struct dwc3_ep *dep = to_dwc3_ep(ep);
859 struct dwc3 *dwc = dep->dwc;
860
861 memset(&params, 0, sizeof(params));
862 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
863 params.param0 |= (ep->ep_intr_num << 16);
864 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
865 &dep->trb_pool[0]));
866 cmd = DWC3_DEPCMD_STARTTRANSFER;
867 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700868 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700869
870 if (ret < 0)
871 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700872 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700873 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
874 return ret;
875}
876
877/*
878* Store Ring Base and Doorbell Address for GSI EP
879* for GSI channel creation.
880*
881* @usb_ep - pointer to usb_ep instance.
882* @dbl_addr - Doorbell address obtained from IPA driver
883*/
884static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
885{
886 struct dwc3_ep *dep = to_dwc3_ep(ep);
887 struct dwc3 *dwc = dep->dwc;
888 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
889 int n = ep->ep_intr_num - 1;
890
891 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
892 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
893 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
894
895 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
896 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
897 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
898 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
899}
900
901/*
902* Rings Doorbell for IN GSI Channel
903*
904* @usb_ep - pointer to usb_ep instance.
905* @request - pointer to GSI request. This is used to pass in the
906* address of the GSI doorbell obtained from IPA driver
907*/
908static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
909{
910 void __iomem *gsi_dbl_address_lsb;
911 void __iomem *gsi_dbl_address_msb;
912 dma_addr_t offset;
913 u64 dbl_addr = *((u64 *)request->buf_base_addr);
914 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
915 u32 dbl_hi_addr = (dbl_addr >> 32);
916 u32 num_trbs = (request->num_bufs * 2 + 2);
917 struct dwc3_ep *dep = to_dwc3_ep(ep);
918 struct dwc3 *dwc = dep->dwc;
919 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
920
921 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
922 dbl_lo_addr, sizeof(u32));
923 if (!gsi_dbl_address_lsb)
924 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
925
926 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
927 dbl_hi_addr, sizeof(u32));
928 if (!gsi_dbl_address_msb)
929 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
930
931 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
932 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
933 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
934
935 writel_relaxed(offset, gsi_dbl_address_lsb);
936 writel_relaxed(0, gsi_dbl_address_msb);
937}
938
939/*
940* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
941*
942* @usb_ep - pointer to usb_ep instance.
943* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
944*
945* @return int - 0 on success
946*/
947static int gsi_updatexfer_for_ep(struct usb_ep *ep,
948 struct usb_gsi_request *request)
949{
950 int i;
951 int ret;
952 u32 cmd;
953 int num_trbs = request->num_bufs + 1;
954 struct dwc3_trb *trb;
955 struct dwc3_gadget_ep_cmd_params params;
956 struct dwc3_ep *dep = to_dwc3_ep(ep);
957 struct dwc3 *dwc = dep->dwc;
958
959 for (i = 0; i < num_trbs - 1; i++) {
960 trb = &dep->trb_pool[i];
961 trb->ctrl |= DWC3_TRB_CTRL_HWO;
962 }
963
964 memset(&params, 0, sizeof(params));
965 cmd = DWC3_DEPCMD_UPDATETRANSFER;
966 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700967 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700968 dep->flags |= DWC3_EP_BUSY;
969 if (ret < 0)
970 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
971 return ret;
972}
973
974/*
975* Perform EndXfer on particular GSI EP.
976*
977* @usb_ep - pointer to usb_ep instance.
978*/
979static void gsi_endxfer_for_ep(struct usb_ep *ep)
980{
981 struct dwc3_ep *dep = to_dwc3_ep(ep);
982 struct dwc3 *dwc = dep->dwc;
983
984 dwc3_stop_active_transfer(dwc, dep->number, true);
985}
986
987/*
988* Allocates and configures TRBs for GSI EPs.
989*
990* @usb_ep - pointer to usb_ep instance.
991* @request - pointer to GSI request.
992*
993* @return int - 0 on success
994*/
995static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
996{
997 int i = 0;
998 dma_addr_t buffer_addr = req->dma;
999 struct dwc3_ep *dep = to_dwc3_ep(ep);
1000 struct dwc3 *dwc = dep->dwc;
1001 struct dwc3_trb *trb;
1002 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
1003 : (req->num_bufs + 1);
1004
1005 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
1006 num_trbs * sizeof(struct dwc3_trb),
1007 num_trbs * sizeof(struct dwc3_trb), 0);
1008 if (!dep->trb_dma_pool) {
1009 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1010 dep->name);
1011 return -ENOMEM;
1012 }
1013
1014 dep->num_trbs = num_trbs;
1015
1016 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1017 GFP_KERNEL, &dep->trb_pool_dma);
1018 if (!dep->trb_pool) {
1019 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1020 dep->name);
1021 return -ENOMEM;
1022 }
1023
1024 /* IN direction */
1025 if (dep->direction) {
1026 for (i = 0; i < num_trbs ; i++) {
1027 trb = &dep->trb_pool[i];
1028 memset(trb, 0, sizeof(*trb));
1029 /* Set up first n+1 TRBs for ZLPs */
1030 if (i < (req->num_bufs + 1)) {
1031 trb->bpl = 0;
1032 trb->bph = 0;
1033 trb->size = 0;
1034 trb->ctrl = DWC3_TRBCTL_NORMAL
1035 | DWC3_TRB_CTRL_IOC;
1036 continue;
1037 }
1038
1039 /* Setup n TRBs pointing to valid buffers */
1040 trb->bpl = lower_32_bits(buffer_addr);
1041 trb->bph = 0;
1042 trb->size = 0;
1043 trb->ctrl = DWC3_TRBCTL_NORMAL
1044 | DWC3_TRB_CTRL_IOC;
1045 buffer_addr += req->buf_len;
1046
1047 /* Set up the Link TRB at the end */
1048 if (i == (num_trbs - 1)) {
1049 trb->bpl = dwc3_trb_dma_offset(dep,
1050 &dep->trb_pool[0]);
1051 trb->bph = (1 << 23) | (1 << 21)
1052 | (ep->ep_intr_num << 16);
1053 trb->size = 0;
1054 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1055 | DWC3_TRB_CTRL_HWO;
1056 }
1057 }
1058 } else { /* OUT direction */
1059
1060 for (i = 0; i < num_trbs ; i++) {
1061
1062 trb = &dep->trb_pool[i];
1063 memset(trb, 0, sizeof(*trb));
1064 trb->bpl = lower_32_bits(buffer_addr);
1065 trb->bph = 0;
1066 trb->size = req->buf_len;
1067 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1068 | DWC3_TRB_CTRL_CSP
1069 | DWC3_TRB_CTRL_ISP_IMI;
1070 buffer_addr += req->buf_len;
1071
1072 /* Set up the Link TRB at the end */
1073 if (i == (num_trbs - 1)) {
1074 trb->bpl = dwc3_trb_dma_offset(dep,
1075 &dep->trb_pool[0]);
1076 trb->bph = (1 << 23) | (1 << 21)
1077 | (ep->ep_intr_num << 16);
1078 trb->size = 0;
1079 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1080 | DWC3_TRB_CTRL_HWO;
1081 }
1082 }
1083 }
1084 return 0;
1085}
1086
1087/*
1088* Frees TRBs for GSI EPs.
1089*
1090* @usb_ep - pointer to usb_ep instance.
1091*
1092*/
1093static void gsi_free_trbs(struct usb_ep *ep)
1094{
1095 struct dwc3_ep *dep = to_dwc3_ep(ep);
1096
1097 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1098 return;
1099
1100 /* Free TRBs and TRB pool for EP */
1101 if (dep->trb_dma_pool) {
1102 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1103 dep->trb_pool_dma);
1104 dma_pool_destroy(dep->trb_dma_pool);
1105 dep->trb_pool = NULL;
1106 dep->trb_pool_dma = 0;
1107 dep->trb_dma_pool = NULL;
1108 }
1109}
1110/*
1111* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1112*
1113* @usb_ep - pointer to usb_ep instance.
1114* @request - pointer to GSI request.
1115*/
1116static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1117{
1118 struct dwc3_ep *dep = to_dwc3_ep(ep);
1119 struct dwc3 *dwc = dep->dwc;
1120 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1121 struct dwc3_gadget_ep_cmd_params params;
1122 const struct usb_endpoint_descriptor *desc = ep->desc;
1123 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1124 u32 reg;
1125
1126 memset(&params, 0x00, sizeof(params));
1127
1128 /* Configure GSI EP */
1129 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1130 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1131
1132 /* Burst size is only needed in SuperSpeed mode */
1133 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1134 u32 burst = dep->endpoint.maxburst - 1;
1135
1136 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1137 }
1138
1139 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1140 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1141 | DWC3_DEPCFG_STREAM_EVENT_EN;
1142 dep->stream_capable = true;
1143 }
1144
1145 /* Set EP number */
1146 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1147
1148 /* Set interrupter number for GSI endpoints */
1149 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1150
1151 /* Enable XferInProgress and XferComplete Interrupts */
1152 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1153 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1154 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1155 /*
1156 * We must use the lower 16 TX FIFOs even though
1157 * HW might have more
1158 */
1159 /* Remove FIFO Number for GSI EP*/
1160 if (dep->direction)
1161 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1162
1163 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1164
1165 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1166 params.param0, params.param1, params.param2, dep->name);
1167
Mayank Rana83ad5822016-08-09 14:17:22 -07001168 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001169
1170 /* Set XferRsc Index for GSI EP */
1171 if (!(dep->flags & DWC3_EP_ENABLED)) {
1172 memset(&params, 0x00, sizeof(params));
1173 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001174 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001175 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1176
1177 dep->endpoint.desc = desc;
1178 dep->comp_desc = comp_desc;
1179 dep->type = usb_endpoint_type(desc);
1180 dep->flags |= DWC3_EP_ENABLED;
1181 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1182 reg |= DWC3_DALEPENA_EP(dep->number);
1183 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1184 }
1185
1186}
1187
1188/*
1189* Enables USB wrapper for GSI
1190*
1191* @usb_ep - pointer to usb_ep instance.
1192*/
1193static void gsi_enable(struct usb_ep *ep)
1194{
1195 struct dwc3_ep *dep = to_dwc3_ep(ep);
1196 struct dwc3 *dwc = dep->dwc;
1197 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1198
1199 dwc3_msm_write_reg_field(mdwc->base,
1200 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1201 dwc3_msm_write_reg_field(mdwc->base,
1202 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1203 dwc3_msm_write_reg_field(mdwc->base,
1204 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1205 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1206 dwc3_msm_write_reg_field(mdwc->base,
1207 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1208}
1209
1210/*
1211* Block or allow doorbell towards GSI
1212*
1213* @usb_ep - pointer to usb_ep instance.
1214* @request - pointer to GSI request. In this case num_bufs is used as a bool
1215* to set or clear the doorbell bit
1216*/
1217static void gsi_set_clear_dbell(struct usb_ep *ep,
1218 bool block_db)
1219{
1220
1221 struct dwc3_ep *dep = to_dwc3_ep(ep);
1222 struct dwc3 *dwc = dep->dwc;
1223 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1224
1225 dwc3_msm_write_reg_field(mdwc->base,
1226 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1227}
1228
1229/*
1230* Performs necessary checks before stopping GSI channels
1231*
1232* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1233*/
1234static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1235{
1236 u32 timeout = 1500;
1237 u32 reg = 0;
1238 struct dwc3_ep *dep = to_dwc3_ep(ep);
1239 struct dwc3 *dwc = dep->dwc;
1240 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1241
1242 while (dwc3_msm_read_reg_field(mdwc->base,
1243 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1244 if (!timeout--) {
1245 dev_err(mdwc->dev,
1246 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1247 return false;
1248 }
1249 }
1250 /* Check for U3 only if we are not handling Function Suspend */
1251 if (!f_suspend) {
1252 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1253 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1254 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1255 return false;
1256 }
1257 }
1258
1259 return true;
1260}
1261
1262
1263/**
1264* Performs GSI operations or GSI EP related operations.
1265*
1266* @usb_ep - pointer to usb_ep instance.
1267* @op_data - pointer to opcode related data.
1268* @op - GSI related or GSI EP related op code.
1269*
1270* @return int - 0 on success, negative on error.
1271* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1272*/
1273static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1274 void *op_data, enum gsi_ep_op op)
1275{
1276 u32 ret = 0;
1277 struct dwc3_ep *dep = to_dwc3_ep(ep);
1278 struct dwc3 *dwc = dep->dwc;
1279 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1280 struct usb_gsi_request *request;
1281 struct gsi_channel_info *ch_info;
1282 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001283 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001284
1285 switch (op) {
1286 case GSI_EP_OP_PREPARE_TRBS:
1287 request = (struct usb_gsi_request *)op_data;
1288 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1289 ret = gsi_prepare_trbs(ep, request);
1290 break;
1291 case GSI_EP_OP_FREE_TRBS:
1292 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1293 gsi_free_trbs(ep);
1294 break;
1295 case GSI_EP_OP_CONFIG:
1296 request = (struct usb_gsi_request *)op_data;
1297 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001298 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001299 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001300 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001301 break;
1302 case GSI_EP_OP_STARTXFER:
1303 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001304 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001305 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001306 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001307 break;
1308 case GSI_EP_OP_GET_XFER_IDX:
1309 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1310 ret = gsi_get_xfer_index(ep);
1311 break;
1312 case GSI_EP_OP_STORE_DBL_INFO:
1313 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1314 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1315 break;
1316 case GSI_EP_OP_ENABLE_GSI:
1317 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1318 gsi_enable(ep);
1319 break;
1320 case GSI_EP_OP_GET_CH_INFO:
1321 ch_info = (struct gsi_channel_info *)op_data;
1322 gsi_get_channel_info(ep, ch_info);
1323 break;
1324 case GSI_EP_OP_RING_IN_DB:
1325 request = (struct usb_gsi_request *)op_data;
1326 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1327 gsi_ring_in_db(ep, request);
1328 break;
1329 case GSI_EP_OP_UPDATEXFER:
1330 request = (struct usb_gsi_request *)op_data;
1331 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001332 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001333 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001334 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001335 break;
1336 case GSI_EP_OP_ENDXFER:
1337 request = (struct usb_gsi_request *)op_data;
1338 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001339 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001340 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001341 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001342 break;
1343 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1344 block_db = *((bool *)op_data);
1345 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1346 block_db);
1347 gsi_set_clear_dbell(ep, block_db);
1348 break;
1349 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1350 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1351 f_suspend = *((bool *)op_data);
1352 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1353 break;
1354 case GSI_EP_OP_DISABLE:
1355 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1356 ret = ep->ops->disable(ep);
1357 break;
1358 default:
1359 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1360 }
1361
1362 return ret;
1363}
1364
1365/**
1366 * Configure MSM endpoint.
1367 * This function do specific configurations
1368 * to an endpoint which need specific implementaion
1369 * in the MSM architecture.
1370 *
1371 * This function should be called by usb function/class
1372 * layer which need a support from the specific MSM HW
1373 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1374 *
1375 * @ep - a pointer to some usb_ep instance
1376 *
1377 * @return int - 0 on success, negetive on error.
1378 */
1379int msm_ep_config(struct usb_ep *ep)
1380{
1381 struct dwc3_ep *dep = to_dwc3_ep(ep);
1382 struct dwc3 *dwc = dep->dwc;
1383 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1384 struct usb_ep_ops *new_ep_ops;
1385
1386
1387 /* Save original ep ops for future restore*/
1388 if (mdwc->original_ep_ops[dep->number]) {
1389 dev_err(mdwc->dev,
1390 "ep [%s,%d] already configured as msm endpoint\n",
1391 ep->name, dep->number);
1392 return -EPERM;
1393 }
1394 mdwc->original_ep_ops[dep->number] = ep->ops;
1395
1396 /* Set new usb ops as we like */
1397 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1398 if (!new_ep_ops)
1399 return -ENOMEM;
1400
1401 (*new_ep_ops) = (*ep->ops);
1402 new_ep_ops->queue = dwc3_msm_ep_queue;
1403 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1404 ep->ops = new_ep_ops;
1405
1406 /*
1407 * Do HERE more usb endpoint configurations
1408 * which are specific to MSM.
1409 */
1410
1411 return 0;
1412}
1413EXPORT_SYMBOL(msm_ep_config);
1414
1415/**
1416 * Un-configure MSM endpoint.
1417 * Tear down configurations done in the
1418 * dwc3_msm_ep_config function.
1419 *
1420 * @ep - a pointer to some usb_ep instance
1421 *
1422 * @return int - 0 on success, negative on error.
1423 */
1424int msm_ep_unconfig(struct usb_ep *ep)
1425{
1426 struct dwc3_ep *dep = to_dwc3_ep(ep);
1427 struct dwc3 *dwc = dep->dwc;
1428 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1429 struct usb_ep_ops *old_ep_ops;
1430
1431 /* Restore original ep ops */
1432 if (!mdwc->original_ep_ops[dep->number]) {
1433 dev_err(mdwc->dev,
1434 "ep [%s,%d] was not configured as msm endpoint\n",
1435 ep->name, dep->number);
1436 return -EINVAL;
1437 }
1438 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1439 ep->ops = mdwc->original_ep_ops[dep->number];
1440 mdwc->original_ep_ops[dep->number] = NULL;
1441 kfree(old_ep_ops);
1442
1443 /*
1444 * Do HERE more usb endpoint un-configurations
1445 * which are specific to MSM.
1446 */
1447
1448 return 0;
1449}
1450EXPORT_SYMBOL(msm_ep_unconfig);
1451#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1452
1453static void dwc3_resume_work(struct work_struct *w);
1454
1455static void dwc3_restart_usb_work(struct work_struct *w)
1456{
1457 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1458 restart_usb_work);
1459 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1460 unsigned int timeout = 50;
1461
1462 dev_dbg(mdwc->dev, "%s\n", __func__);
1463
1464 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1465 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1466 return;
1467 }
1468
1469 /* guard against concurrent VBUS handling */
1470 mdwc->in_restart = true;
1471
1472 if (!mdwc->vbus_active) {
1473 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1474 dwc->err_evt_seen = false;
1475 mdwc->in_restart = false;
1476 return;
1477 }
1478
Mayank Rana08e41922017-03-02 15:25:48 -08001479 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001480 /* Reset active USB connection */
1481 dwc3_resume_work(&mdwc->resume_work);
1482
1483 /* Make sure disconnect is processed before sending connect */
1484 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1485 msleep(20);
1486
1487 if (!timeout) {
1488 dev_dbg(mdwc->dev,
1489 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001490 dbg_event(0xFF, "ReStart:RT SUSP",
1491 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001492 pm_runtime_suspend(mdwc->dev);
1493 }
1494
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301495 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001496 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301497 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001498 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001499
1500 dwc->err_evt_seen = false;
1501 flush_delayed_work(&mdwc->sm_work);
1502}
1503
1504/*
1505 * Check whether the DWC3 requires resetting the ep
1506 * after going to Low Power Mode (lpm)
1507 */
1508bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1509{
1510 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1511 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1512
1513 return dbm_reset_ep_after_lpm(mdwc->dbm);
1514}
1515EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1516
1517/*
1518 * Config Global Distributed Switch Controller (GDSC)
1519 * to support controller power collapse
1520 */
1521static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1522{
1523 int ret;
1524
1525 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1526 return -EPERM;
1527
1528 if (on) {
1529 ret = regulator_enable(mdwc->dwc3_gdsc);
1530 if (ret) {
1531 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1532 return ret;
1533 }
1534 } else {
1535 ret = regulator_disable(mdwc->dwc3_gdsc);
1536 if (ret) {
1537 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1538 return ret;
1539 }
1540 }
1541
1542 return ret;
1543}
1544
1545static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1546{
1547 int ret = 0;
1548
1549 if (assert) {
1550 disable_irq(mdwc->pwr_event_irq);
1551 /* Using asynchronous block reset to the hardware */
1552 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1553 clk_disable_unprepare(mdwc->utmi_clk);
1554 clk_disable_unprepare(mdwc->sleep_clk);
1555 clk_disable_unprepare(mdwc->core_clk);
1556 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301557 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001558 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301559 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001560 } else {
1561 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301562 ret = reset_control_deassert(mdwc->core_reset);
1563 if (ret)
1564 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001565 ndelay(200);
1566 clk_prepare_enable(mdwc->iface_clk);
1567 clk_prepare_enable(mdwc->core_clk);
1568 clk_prepare_enable(mdwc->sleep_clk);
1569 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001570 enable_irq(mdwc->pwr_event_irq);
1571 }
1572
1573 return ret;
1574}
1575
1576static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1577{
1578 u32 guctl, gfladj = 0;
1579
1580 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1581 guctl &= ~DWC3_GUCTL_REFCLKPER;
1582
1583 /* GFLADJ register is used starting with revision 2.50a */
1584 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1585 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1586 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1587 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1588 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1589 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1590 }
1591
1592 /* Refer to SNPS Databook Table 6-55 for calculations used */
1593 switch (mdwc->utmi_clk_rate) {
1594 case 19200000:
1595 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1596 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1597 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1598 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1599 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1600 break;
1601 case 24000000:
1602 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1603 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1604 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1605 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1606 break;
1607 default:
1608 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1609 mdwc->utmi_clk_rate);
1610 break;
1611 }
1612
1613 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1614 if (gfladj)
1615 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1616}
1617
1618/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1619static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1620{
1621 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1622 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1623 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1624 BIT(2), 1);
1625
1626 /*
1627 * Enable master clock for RAMs to allow BAM to access RAMs when
1628 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1629 * are seen where RAM clocks get turned OFF in SS mode
1630 */
1631 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1632 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1633
1634}
1635
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001636static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1637{
1638 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1639 vbus_draw_work);
1640 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1641
1642 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1643}
1644
Mayank Rana511f3b22016-08-02 12:00:11 -07001645static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1646{
1647 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001648 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001649 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001650 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001651
1652 switch (event) {
1653 case DWC3_CONTROLLER_ERROR_EVENT:
1654 dev_info(mdwc->dev,
1655 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1656 dwc->irq_cnt);
1657
1658 dwc3_gadget_disable_irq(dwc);
1659
1660 /* prevent core from generating interrupts until recovery */
1661 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1662 reg |= DWC3_GCTL_CORESOFTRESET;
1663 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1664
1665 /* restart USB which performs full reset and reconnect */
1666 schedule_work(&mdwc->restart_usb_work);
1667 break;
1668 case DWC3_CONTROLLER_RESET_EVENT:
1669 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1670 /* HS & SSPHYs get reset as part of core soft reset */
1671 dwc3_msm_qscratch_reg_init(mdwc);
1672 break;
1673 case DWC3_CONTROLLER_POST_RESET_EVENT:
1674 dev_dbg(mdwc->dev,
1675 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1676
1677 /*
1678 * Below sequence is used when controller is working without
1679 * having ssphy and only USB high speed is supported.
1680 */
1681 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1682 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1683 dwc3_msm_read_reg(mdwc->base,
1684 QSCRATCH_GENERAL_CFG)
1685 | PIPE_UTMI_CLK_DIS);
1686
1687 usleep_range(2, 5);
1688
1689
1690 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1691 dwc3_msm_read_reg(mdwc->base,
1692 QSCRATCH_GENERAL_CFG)
1693 | PIPE_UTMI_CLK_SEL
1694 | PIPE3_PHYSTATUS_SW);
1695
1696 usleep_range(2, 5);
1697
1698 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1699 dwc3_msm_read_reg(mdwc->base,
1700 QSCRATCH_GENERAL_CFG)
1701 & ~PIPE_UTMI_CLK_DIS);
1702 }
1703
1704 dwc3_msm_update_ref_clk(mdwc);
1705 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1706 break;
1707 case DWC3_CONTROLLER_CONNDONE_EVENT:
1708 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1709 /*
1710 * Add power event if the dbm indicates coming out of L1 by
1711 * interrupt
1712 */
1713 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1714 dwc3_msm_write_reg_field(mdwc->base,
1715 PWR_EVNT_IRQ_MASK_REG,
1716 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1717
1718 atomic_set(&dwc->in_lpm, 0);
1719 break;
1720 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1721 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1722 if (dwc->enable_bus_suspend) {
1723 mdwc->suspend = dwc->b_suspend;
1724 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1725 }
1726 break;
1727 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1728 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001729 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001730 break;
1731 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1732 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001733 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001734 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001735 case DWC3_GSI_EVT_BUF_ALLOC:
1736 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1737
1738 if (!mdwc->num_gsi_event_buffers)
1739 break;
1740
1741 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1742 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1743 GFP_KERNEL);
1744 if (!mdwc->gsi_ev_buff) {
1745 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1746 break;
1747 }
1748
1749 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1750
1751 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1752 if (!evt)
1753 break;
1754 evt->dwc = dwc;
1755 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1756 evt->buf = dma_alloc_coherent(dwc->dev,
1757 DWC3_EVENT_BUFFERS_SIZE,
1758 &evt->dma, GFP_KERNEL);
1759 if (!evt->buf) {
1760 dev_err(dwc->dev,
1761 "can't allocate gsi_evt_buf(%d)\n", i);
1762 break;
1763 }
1764 mdwc->gsi_ev_buff[i] = evt;
1765 }
1766 break;
1767 case DWC3_GSI_EVT_BUF_SETUP:
1768 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1769 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1770 evt = mdwc->gsi_ev_buff[i];
1771 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1772 evt->buf, (unsigned long long) evt->dma,
1773 evt->length);
1774 memset(evt->buf, 0, evt->length);
1775 evt->lpos = 0;
1776 /*
1777 * Primary event buffer is programmed with registers
1778 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1779 * program USB GSI related event buffer with DWC3
1780 * controller.
1781 */
1782 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1783 lower_32_bits(evt->dma));
1784 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1785 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1786 DWC3_GEVENT_TYPE_GSI) |
1787 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1788 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1789 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1790 ((evt->length) & 0xffff));
1791 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1792 }
1793 break;
1794 case DWC3_GSI_EVT_BUF_CLEANUP:
1795 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1796 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1797 evt = mdwc->gsi_ev_buff[i];
1798 evt->lpos = 0;
1799 /*
1800 * Primary event buffer is programmed with registers
1801 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1802 * program USB GSI related event buffer with DWC3
1803 * controller.
1804 */
1805 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1806 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1807 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1808 DWC3_GEVNTSIZ_INTMASK |
1809 DWC3_GEVNTSIZ_SIZE((i+1)));
1810 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1811 }
1812 break;
1813 case DWC3_GSI_EVT_BUF_FREE:
1814 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1815 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1816 evt = mdwc->gsi_ev_buff[i];
1817 if (evt)
1818 dma_free_coherent(dwc->dev, evt->length,
1819 evt->buf, evt->dma);
1820 }
1821 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001822 default:
1823 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1824 break;
1825 }
1826}
1827
1828static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1829{
1830 int ret = 0;
1831
1832 if (core_reset) {
1833 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1834 if (ret)
1835 return;
1836
1837 usleep_range(1000, 1200);
1838 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1839 if (ret)
1840 return;
1841
1842 usleep_range(10000, 12000);
1843 }
1844
1845 if (mdwc->dbm) {
1846 /* Reset the DBM */
1847 dbm_soft_reset(mdwc->dbm, 1);
1848 usleep_range(1000, 1200);
1849 dbm_soft_reset(mdwc->dbm, 0);
1850
1851 /*enable DBM*/
1852 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1853 DBM_EN_MASK, 0x1);
1854 dbm_enable(mdwc->dbm);
1855 }
1856}
1857
1858static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1859{
1860 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1861 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301862 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001863
1864 /* Configure AHB2PHY for one wait state read/write */
1865 if (mdwc->ahb2phy_base) {
1866 clk_prepare_enable(mdwc->cfg_ahb_clk);
1867 val = readl_relaxed(mdwc->ahb2phy_base +
1868 PERIPH_SS_AHB2PHY_TOP_CFG);
1869 if (val != ONE_READ_WRITE_WAIT) {
1870 writel_relaxed(ONE_READ_WRITE_WAIT,
1871 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1872 /* complete above write before configuring USB PHY. */
1873 mb();
1874 }
1875 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1876 }
1877
1878 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001879 dbg_event(0xFF, "dwc3 init",
1880 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301881 ret = dwc3_core_pre_init(dwc);
1882 if (ret) {
1883 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1884 return;
1885 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001886 mdwc->init = true;
1887 }
1888
1889 dwc3_core_init(dwc);
1890 /* Re-configure event buffers */
1891 dwc3_event_buffers_setup(dwc);
1892}
1893
1894static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1895{
1896 unsigned long timeout;
1897 u32 reg = 0;
1898
1899 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301900 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001901 if (!atomic_read(&mdwc->in_p3)) {
1902 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1903 return -EBUSY;
1904 }
1905 }
1906
1907 /* Clear previous L2 events */
1908 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1909 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1910
1911 /* Prepare HSPHY for suspend */
1912 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1913 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1914 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1915
1916 /* Wait for PHY to go into L2 */
1917 timeout = jiffies + msecs_to_jiffies(5);
1918 while (!time_after(jiffies, timeout)) {
1919 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1920 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1921 break;
1922 }
1923 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1924 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1925
1926 /* Clear L2 event bit */
1927 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1928 PWR_EVNT_LPM_IN_L2_MASK);
1929
1930 return 0;
1931}
1932
1933static void dwc3_msm_bus_vote_w(struct work_struct *w)
1934{
1935 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1936 int ret;
1937
1938 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1939 mdwc->bus_vote);
1940 if (ret)
1941 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1942}
1943
1944static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1945{
1946 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1947 int i, num_ports;
1948 u32 reg;
1949
1950 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1951 if (mdwc->in_host_mode) {
1952 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1953 num_ports = HCS_MAX_PORTS(reg);
1954 for (i = 0; i < num_ports; i++) {
1955 reg = dwc3_msm_read_reg(mdwc->base,
1956 USB3_PORTSC + i*0x10);
1957 if (reg & PORT_PE) {
1958 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1959 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1960 else if (DEV_LOWSPEED(reg))
1961 mdwc->hs_phy->flags |= PHY_LS_MODE;
1962 }
1963 }
1964 } else {
1965 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1966 dwc->gadget.speed == USB_SPEED_FULL)
1967 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1968 else if (dwc->gadget.speed == USB_SPEED_LOW)
1969 mdwc->hs_phy->flags |= PHY_LS_MODE;
1970 }
1971}
1972
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05301973static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
1974 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07001975
1976static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1977{
Mayank Rana83ad5822016-08-09 14:17:22 -07001978 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001979 bool can_suspend_ssphy;
1980 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001981 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001982
1983 if (atomic_read(&dwc->in_lpm)) {
1984 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1985 return 0;
1986 }
1987
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05301988 cancel_delayed_work_sync(&mdwc->perf_vote_work);
1989 msm_dwc3_perf_vote_update(mdwc, false);
1990
Mayank Rana511f3b22016-08-02 12:00:11 -07001991 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001992 evt = dwc->ev_buf;
1993 if ((evt->flags & DWC3_EVENT_PENDING)) {
1994 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001995 "%s: %d device events pending, abort suspend\n",
1996 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001997 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001998 }
1999 }
2000
2001 if (!mdwc->vbus_active && dwc->is_drd &&
2002 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2003 /*
2004 * In some cases, the pm_runtime_suspend may be called by
2005 * usb_bam when there is pending lpm flag. However, if this is
2006 * done when cable was disconnected and otg state has not
2007 * yet changed to IDLE, then it means OTG state machine
2008 * is running and we race against it. So cancel LPM for now,
2009 * and OTG state machine will go for LPM later, after completing
2010 * transition to IDLE state.
2011 */
2012 dev_dbg(mdwc->dev,
2013 "%s: cable disconnected while not in idle otg state\n",
2014 __func__);
2015 return -EBUSY;
2016 }
2017
2018 /*
2019 * Check if device is not in CONFIGURED state
2020 * then check controller state of L2 and break
2021 * LPM sequence. Check this for device bus suspend case.
2022 */
2023 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2024 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2025 pr_err("%s(): Trying to go in LPM with state:%d\n",
2026 __func__, dwc->gadget.state);
2027 pr_err("%s(): LPM is not performed.\n", __func__);
2028 return -EBUSY;
2029 }
2030
2031 ret = dwc3_msm_prepare_suspend(mdwc);
2032 if (ret)
2033 return ret;
2034
2035 /* Initialize variables here */
2036 can_suspend_ssphy = !(mdwc->in_host_mode &&
2037 dwc3_msm_is_host_superspeed(mdwc));
2038
2039 /* Disable core irq */
2040 if (dwc->irq)
2041 disable_irq(dwc->irq);
2042
2043 /* disable power event irq, hs and ss phy irq is used as wake up src */
2044 disable_irq(mdwc->pwr_event_irq);
2045
2046 dwc3_set_phy_speed_flags(mdwc);
2047 /* Suspend HS PHY */
2048 usb_phy_set_suspend(mdwc->hs_phy, 1);
2049
2050 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002051 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002052 /* indicate phy about SS mode */
2053 if (dwc3_msm_is_superspeed(mdwc))
2054 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2055 usb_phy_set_suspend(mdwc->ss_phy, 1);
2056 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2057 }
2058
2059 /* make sure above writes are completed before turning off clocks */
2060 wmb();
2061
2062 /* Disable clocks */
2063 if (mdwc->bus_aggr_clk)
2064 clk_disable_unprepare(mdwc->bus_aggr_clk);
2065 clk_disable_unprepare(mdwc->utmi_clk);
2066
Hemant Kumar633dc332016-08-10 13:41:05 -07002067 /* Memory core: OFF, Memory periphery: OFF */
2068 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2069 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2070 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2071 }
2072
Mayank Rana511f3b22016-08-02 12:00:11 -07002073 clk_set_rate(mdwc->core_clk, 19200000);
2074 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302075 if (mdwc->noc_aggr_clk)
2076 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002077 /*
2078 * Disable iface_clk only after core_clk as core_clk has FSM
2079 * depedency on iface_clk. Hence iface_clk should be turned off
2080 * after core_clk is turned off.
2081 */
2082 clk_disable_unprepare(mdwc->iface_clk);
2083 /* USB PHY no more requires TCXO */
2084 clk_disable_unprepare(mdwc->xo_clk);
2085
2086 /* Perform controller power collapse */
2087 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2088 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2089 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2090 dwc3_msm_config_gdsc(mdwc, 0);
2091 clk_disable_unprepare(mdwc->sleep_clk);
2092 }
2093
2094 /* Remove bus voting */
2095 if (mdwc->bus_perf_client) {
2096 mdwc->bus_vote = 0;
2097 schedule_work(&mdwc->bus_vote_w);
2098 }
2099
2100 /*
2101 * release wakeup source with timeout to defer system suspend to
2102 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2103 * event is received.
2104 */
2105 if (mdwc->lpm_to_suspend_delay) {
2106 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2107 mdwc->lpm_to_suspend_delay);
2108 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2109 } else {
2110 pm_relax(mdwc->dev);
2111 }
2112
2113 atomic_set(&dwc->in_lpm, 1);
2114
2115 /*
2116 * with DCP or during cable disconnect, we dont require wakeup
2117 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2118 * case of host bus suspend and device bus suspend.
2119 */
2120 if (mdwc->vbus_active || mdwc->in_host_mode) {
2121 enable_irq_wake(mdwc->hs_phy_irq);
2122 enable_irq(mdwc->hs_phy_irq);
2123 if (mdwc->ss_phy_irq) {
2124 enable_irq_wake(mdwc->ss_phy_irq);
2125 enable_irq(mdwc->ss_phy_irq);
2126 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002127 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2128 }
2129
2130 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2131 return 0;
2132}
2133
2134static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2135{
2136 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002137 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002138 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2139
2140 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2141
2142 if (!atomic_read(&dwc->in_lpm)) {
2143 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2144 return 0;
2145 }
2146
2147 pm_stay_awake(mdwc->dev);
2148
2149 /* Enable bus voting */
2150 if (mdwc->bus_perf_client) {
2151 mdwc->bus_vote = 1;
2152 schedule_work(&mdwc->bus_vote_w);
2153 }
2154
2155 /* Vote for TCXO while waking up USB HSPHY */
2156 ret = clk_prepare_enable(mdwc->xo_clk);
2157 if (ret)
2158 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2159 __func__, ret);
2160
2161 /* Restore controller power collapse */
2162 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2163 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2164 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302165 ret = reset_control_assert(mdwc->core_reset);
2166 if (ret)
2167 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2168 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002169 /* HW requires a short delay for reset to take place properly */
2170 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302171 ret = reset_control_deassert(mdwc->core_reset);
2172 if (ret)
2173 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2174 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002175 clk_prepare_enable(mdwc->sleep_clk);
2176 }
2177
2178 /*
2179 * Enable clocks
2180 * Turned ON iface_clk before core_clk due to FSM depedency.
2181 */
2182 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302183 if (mdwc->noc_aggr_clk)
2184 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002185
2186 core_clk_rate = mdwc->core_clk_rate;
2187 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2188 core_clk_rate = mdwc->core_clk_rate_hs;
2189 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2190 core_clk_rate);
2191 }
2192
2193 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002194 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002195
2196 /* set Memory core: ON, Memory periphery: ON */
2197 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2198 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2199
Mayank Rana511f3b22016-08-02 12:00:11 -07002200 clk_prepare_enable(mdwc->utmi_clk);
2201 if (mdwc->bus_aggr_clk)
2202 clk_prepare_enable(mdwc->bus_aggr_clk);
2203
2204 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002205 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2206 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002207 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2208 if (mdwc->typec_orientation == ORIENTATION_CC1)
2209 mdwc->ss_phy->flags |= PHY_LANE_A;
2210 if (mdwc->typec_orientation == ORIENTATION_CC2)
2211 mdwc->ss_phy->flags |= PHY_LANE_B;
2212 usb_phy_set_suspend(mdwc->ss_phy, 0);
2213 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2214 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2215 }
2216
2217 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2218 /* Resume HS PHY */
2219 usb_phy_set_suspend(mdwc->hs_phy, 0);
2220
2221 /* Recover from controller power collapse */
2222 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2223 u32 tmp;
2224
2225 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2226
2227 dwc3_msm_power_collapse_por(mdwc);
2228
2229 /* Get initial P3 status and enable IN_P3 event */
2230 tmp = dwc3_msm_read_reg_field(mdwc->base,
2231 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2232 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2233 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2234 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2235
2236 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2237 }
2238
2239 atomic_set(&dwc->in_lpm, 0);
2240
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302241 /* enable power evt irq for IN P3 detection */
2242 enable_irq(mdwc->pwr_event_irq);
2243
Mayank Rana511f3b22016-08-02 12:00:11 -07002244 /* Disable HSPHY auto suspend */
2245 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2246 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2247 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2248 DWC3_GUSB2PHYCFG_SUSPHY));
2249
2250 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2251 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2252 disable_irq_wake(mdwc->hs_phy_irq);
2253 disable_irq_nosync(mdwc->hs_phy_irq);
2254 if (mdwc->ss_phy_irq) {
2255 disable_irq_wake(mdwc->ss_phy_irq);
2256 disable_irq_nosync(mdwc->ss_phy_irq);
2257 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002258 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2259 }
2260
2261 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2262
Mayank Rana511f3b22016-08-02 12:00:11 -07002263 /* Enable core irq */
2264 if (dwc->irq)
2265 enable_irq(dwc->irq);
2266
2267 /*
2268 * Handle other power events that could not have been handled during
2269 * Low Power Mode
2270 */
2271 dwc3_pwr_event_handler(mdwc);
2272
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302273 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2274 schedule_delayed_work(&mdwc->perf_vote_work,
2275 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2276
Mayank Rana08e41922017-03-02 15:25:48 -08002277 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002278 return 0;
2279}
2280
2281/**
2282 * dwc3_ext_event_notify - callback to handle events from external transceiver
2283 *
2284 * Returns 0 on success
2285 */
2286static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2287{
2288 /* Flush processing any pending events before handling new ones */
2289 flush_delayed_work(&mdwc->sm_work);
2290
2291 if (mdwc->id_state == DWC3_ID_FLOAT) {
2292 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2293 set_bit(ID, &mdwc->inputs);
2294 } else {
2295 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2296 clear_bit(ID, &mdwc->inputs);
2297 }
2298
2299 if (mdwc->vbus_active && !mdwc->in_restart) {
2300 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2301 set_bit(B_SESS_VLD, &mdwc->inputs);
2302 } else {
2303 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2304 clear_bit(B_SESS_VLD, &mdwc->inputs);
2305 }
2306
2307 if (mdwc->suspend) {
2308 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2309 set_bit(B_SUSPEND, &mdwc->inputs);
2310 } else {
2311 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2312 clear_bit(B_SUSPEND, &mdwc->inputs);
2313 }
2314
2315 schedule_delayed_work(&mdwc->sm_work, 0);
2316}
2317
2318static void dwc3_resume_work(struct work_struct *w)
2319{
2320 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002321 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002322
2323 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2324
2325 /*
2326 * exit LPM first to meet resume timeline from device side.
2327 * resume_pending flag would prevent calling
2328 * dwc3_msm_resume() in case we are here due to system
2329 * wide resume without usb cable connected. This flag is set
2330 * only in case of power event irq in lpm.
2331 */
2332 if (mdwc->resume_pending) {
2333 dwc3_msm_resume(mdwc);
2334 mdwc->resume_pending = false;
2335 }
2336
Mayank Rana08e41922017-03-02 15:25:48 -08002337 if (atomic_read(&mdwc->pm_suspended)) {
2338 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002339 /* let pm resume kick in resume work later */
2340 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002341 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002342 dwc3_ext_event_notify(mdwc);
2343}
2344
2345static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2346{
2347 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2348 u32 irq_stat, irq_clear = 0;
2349
2350 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2351 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2352
2353 /* Check for P3 events */
2354 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2355 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2356 /* Can't tell if entered or exit P3, so check LINKSTATE */
2357 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2358 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2359 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2360 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2361
2362 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2363 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2364 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2365 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2366 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2367 atomic_set(&mdwc->in_p3, 0);
2368 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2369 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2370 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2371 atomic_set(&mdwc->in_p3, 1);
2372 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2373 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2374 }
2375
2376 /* Clear L2 exit */
2377 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2378 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2379 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2380 }
2381
2382 /* Handle exit from L1 events */
2383 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2384 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2385 __func__);
2386 if (usb_gadget_wakeup(&dwc->gadget))
2387 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2388 __func__);
2389 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2390 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2391 }
2392
2393 /* Unhandled events */
2394 if (irq_stat)
2395 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2396 __func__, irq_stat);
2397
2398 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2399}
2400
2401static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2402{
2403 struct dwc3_msm *mdwc = _mdwc;
2404 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2405
2406 dev_dbg(mdwc->dev, "%s\n", __func__);
2407
2408 if (atomic_read(&dwc->in_lpm))
2409 dwc3_resume_work(&mdwc->resume_work);
2410 else
2411 dwc3_pwr_event_handler(mdwc);
2412
Mayank Rana08e41922017-03-02 15:25:48 -08002413 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002414 return IRQ_HANDLED;
2415}
2416
2417static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2418{
2419 struct dwc3_msm *mdwc = data;
2420 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2421
2422 dwc->t_pwr_evt_irq = ktime_get();
2423 dev_dbg(mdwc->dev, "%s received\n", __func__);
2424 /*
2425 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2426 * which interrupts have been triggered, as the clocks are disabled.
2427 * Resume controller by waking up pwr event irq thread.After re-enabling
2428 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2429 * all other power events.
2430 */
2431 if (atomic_read(&dwc->in_lpm)) {
2432 /* set this to call dwc3_msm_resume() */
2433 mdwc->resume_pending = true;
2434 return IRQ_WAKE_THREAD;
2435 }
2436
2437 dwc3_pwr_event_handler(mdwc);
2438 return IRQ_HANDLED;
2439}
2440
2441static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2442 unsigned long action, void *hcpu)
2443{
2444 uint32_t cpu = (uintptr_t)hcpu;
2445 struct dwc3_msm *mdwc =
2446 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2447
2448 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2449 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2450 cpu_to_affin, mdwc->irq_to_affin);
2451 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2452 }
2453
2454 return NOTIFY_OK;
2455}
2456
2457static void dwc3_otg_sm_work(struct work_struct *w);
2458
2459static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2460{
2461 int ret;
2462
2463 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2464 if (IS_ERR(mdwc->dwc3_gdsc))
2465 mdwc->dwc3_gdsc = NULL;
2466
2467 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2468 if (IS_ERR(mdwc->xo_clk)) {
2469 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2470 __func__);
2471 ret = PTR_ERR(mdwc->xo_clk);
2472 return ret;
2473 }
2474 clk_set_rate(mdwc->xo_clk, 19200000);
2475
2476 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2477 if (IS_ERR(mdwc->iface_clk)) {
2478 dev_err(mdwc->dev, "failed to get iface_clk\n");
2479 ret = PTR_ERR(mdwc->iface_clk);
2480 return ret;
2481 }
2482
2483 /*
2484 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2485 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2486 * On newer platform it can run at 150MHz as well.
2487 */
2488 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2489 if (IS_ERR(mdwc->core_clk)) {
2490 dev_err(mdwc->dev, "failed to get core_clk\n");
2491 ret = PTR_ERR(mdwc->core_clk);
2492 return ret;
2493 }
2494
Amit Nischal4d278212016-06-06 17:54:34 +05302495 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2496 if (IS_ERR(mdwc->core_reset)) {
2497 dev_err(mdwc->dev, "failed to get core_reset\n");
2498 return PTR_ERR(mdwc->core_reset);
2499 }
2500
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302501 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302502 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302503 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2504 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302505 }
2506
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302507 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302508 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302509 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2510 mdwc->core_clk_rate);
2511 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2512 if (ret)
2513 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002514
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002515 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2516 (u32 *)&mdwc->core_clk_rate_hs)) {
2517 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2518 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2519 }
2520
Mayank Rana511f3b22016-08-02 12:00:11 -07002521 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2522 if (IS_ERR(mdwc->sleep_clk)) {
2523 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2524 ret = PTR_ERR(mdwc->sleep_clk);
2525 return ret;
2526 }
2527
2528 clk_set_rate(mdwc->sleep_clk, 32000);
2529 mdwc->utmi_clk_rate = 19200000;
2530 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2531 if (IS_ERR(mdwc->utmi_clk)) {
2532 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2533 ret = PTR_ERR(mdwc->utmi_clk);
2534 return ret;
2535 }
2536
2537 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2538 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2539 if (IS_ERR(mdwc->bus_aggr_clk))
2540 mdwc->bus_aggr_clk = NULL;
2541
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302542 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2543 if (IS_ERR(mdwc->noc_aggr_clk))
2544 mdwc->noc_aggr_clk = NULL;
2545
Mayank Rana511f3b22016-08-02 12:00:11 -07002546 if (of_property_match_string(mdwc->dev->of_node,
2547 "clock-names", "cfg_ahb_clk") >= 0) {
2548 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2549 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2550 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2551 mdwc->cfg_ahb_clk = NULL;
2552 if (ret != -EPROBE_DEFER)
2553 dev_err(mdwc->dev,
2554 "failed to get cfg_ahb_clk ret %d\n",
2555 ret);
2556 return ret;
2557 }
2558 }
2559
2560 return 0;
2561}
2562
2563static int dwc3_msm_id_notifier(struct notifier_block *nb,
2564 unsigned long event, void *ptr)
2565{
2566 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002567 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002568 struct extcon_dev *edev = ptr;
2569 enum dwc3_id_state id;
2570 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002571 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002572
2573 if (!edev) {
2574 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2575 goto done;
2576 }
2577
2578 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2579
2580 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2581
2582 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2583 if (cc_state < 0)
2584 mdwc->typec_orientation = ORIENTATION_NONE;
2585 else
2586 mdwc->typec_orientation =
2587 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2588
Mayank Rana08e41922017-03-02 15:25:48 -08002589 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002590
2591 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2592 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002593 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2594 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002595
Mayank Rana511f3b22016-08-02 12:00:11 -07002596 if (mdwc->id_state != id) {
2597 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002598 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002599 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2600 }
2601
2602done:
2603 return NOTIFY_DONE;
2604}
2605
2606static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2607 unsigned long event, void *ptr)
2608{
2609 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2610 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2611 struct extcon_dev *edev = ptr;
2612 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002613 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002614
2615 if (!edev) {
2616 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2617 goto done;
2618 }
2619
2620 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2621
2622 if (mdwc->vbus_active == event)
2623 return NOTIFY_DONE;
2624
2625 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2626 if (cc_state < 0)
2627 mdwc->typec_orientation = ORIENTATION_NONE;
2628 else
2629 mdwc->typec_orientation =
2630 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2631
Mayank Rana08e41922017-03-02 15:25:48 -08002632 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002633
2634 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2635 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002636 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2637 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002638
Mayank Rana511f3b22016-08-02 12:00:11 -07002639 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002640 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002641 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002642done:
2643 return NOTIFY_DONE;
2644}
2645
2646static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2647{
2648 struct device_node *node = mdwc->dev->of_node;
2649 struct extcon_dev *edev;
2650 int ret = 0;
2651
2652 if (!of_property_read_bool(node, "extcon"))
2653 return 0;
2654
2655 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2656 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2657 return PTR_ERR(edev);
2658
2659 if (!IS_ERR(edev)) {
2660 mdwc->extcon_vbus = edev;
2661 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2662 ret = extcon_register_notifier(edev, EXTCON_USB,
2663 &mdwc->vbus_nb);
2664 if (ret < 0) {
2665 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2666 return ret;
2667 }
2668 }
2669
2670 /* if a second phandle was provided, use it to get a separate edev */
2671 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2672 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2673 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2674 ret = PTR_ERR(edev);
2675 goto err;
2676 }
2677 }
2678
2679 if (!IS_ERR(edev)) {
2680 mdwc->extcon_id = edev;
2681 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2682 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2683 &mdwc->id_nb);
2684 if (ret < 0) {
2685 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2686 goto err;
2687 }
2688 }
2689
2690 return 0;
2691err:
2692 if (mdwc->extcon_vbus)
2693 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2694 &mdwc->vbus_nb);
2695 return ret;
2696}
2697
2698static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2699 char *buf)
2700{
2701 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2702
2703 if (mdwc->vbus_active)
2704 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2705 if (mdwc->id_state == DWC3_ID_GROUND)
2706 return snprintf(buf, PAGE_SIZE, "host\n");
2707
2708 return snprintf(buf, PAGE_SIZE, "none\n");
2709}
2710
2711static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2712 const char *buf, size_t count)
2713{
2714 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2715
2716 if (sysfs_streq(buf, "peripheral")) {
2717 mdwc->vbus_active = true;
2718 mdwc->id_state = DWC3_ID_FLOAT;
2719 } else if (sysfs_streq(buf, "host")) {
2720 mdwc->vbus_active = false;
2721 mdwc->id_state = DWC3_ID_GROUND;
2722 } else {
2723 mdwc->vbus_active = false;
2724 mdwc->id_state = DWC3_ID_FLOAT;
2725 }
2726
2727 dwc3_ext_event_notify(mdwc);
2728
2729 return count;
2730}
2731
2732static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302733static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07002734
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08002735static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
2736 char *buf)
2737{
2738 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2739 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2740
2741 return snprintf(buf, PAGE_SIZE, "%s\n",
2742 usb_speed_string(dwc->max_hw_supp_speed));
2743}
2744
2745static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
2746 const char *buf, size_t count)
2747{
2748 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2749 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2750 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
2751
2752 if (sysfs_streq(buf, "high"))
2753 req_speed = USB_SPEED_HIGH;
2754 else if (sysfs_streq(buf, "super"))
2755 req_speed = USB_SPEED_SUPER;
2756
2757 if (req_speed != USB_SPEED_UNKNOWN &&
2758 req_speed != dwc->max_hw_supp_speed) {
2759 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
2760 schedule_work(&mdwc->restart_usb_work);
2761 }
2762
2763 return count;
2764}
2765static DEVICE_ATTR_RW(speed);
2766
Mayank Rana511f3b22016-08-02 12:00:11 -07002767static int dwc3_msm_probe(struct platform_device *pdev)
2768{
2769 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2770 struct device *dev = &pdev->dev;
2771 struct dwc3_msm *mdwc;
2772 struct dwc3 *dwc;
2773 struct resource *res;
2774 void __iomem *tcsr;
2775 bool host_mode;
2776 int ret = 0;
2777 int ext_hub_reset_gpio;
2778 u32 val;
2779
2780 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2781 if (!mdwc)
2782 return -ENOMEM;
2783
2784 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2785 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2786 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2787 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2788 return -EOPNOTSUPP;
2789 }
2790 }
2791
2792 platform_set_drvdata(pdev, mdwc);
2793 mdwc->dev = &pdev->dev;
2794
2795 INIT_LIST_HEAD(&mdwc->req_complete_list);
2796 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2797 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2798 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002799 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002800 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302801 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002802
2803 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2804 if (!mdwc->dwc3_wq) {
2805 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2806 return -ENOMEM;
2807 }
2808
2809 /* Get all clks and gdsc reference */
2810 ret = dwc3_msm_get_clk_gdsc(mdwc);
2811 if (ret) {
2812 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2813 return ret;
2814 }
2815
2816 mdwc->id_state = DWC3_ID_FLOAT;
2817 set_bit(ID, &mdwc->inputs);
2818
2819 mdwc->charging_disabled = of_property_read_bool(node,
2820 "qcom,charging-disabled");
2821
2822 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2823 &mdwc->lpm_to_suspend_delay);
2824 if (ret) {
2825 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2826 mdwc->lpm_to_suspend_delay = 0;
2827 }
2828
2829 /*
2830 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2831 * DP and DM linestate transitions during low power mode.
2832 */
2833 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2834 if (mdwc->hs_phy_irq < 0) {
2835 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2836 ret = -EINVAL;
2837 goto err;
2838 } else {
2839 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2840 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2841 msm_dwc3_pwr_irq,
2842 msm_dwc3_pwr_irq_thread,
2843 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2844 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2845 if (ret) {
2846 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2847 ret);
2848 goto err;
2849 }
2850 }
2851
2852 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2853 if (mdwc->ss_phy_irq < 0) {
2854 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2855 } else {
2856 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2857 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2858 msm_dwc3_pwr_irq,
2859 msm_dwc3_pwr_irq_thread,
2860 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2861 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2862 if (ret) {
2863 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2864 ret);
2865 goto err;
2866 }
2867 }
2868
2869 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2870 if (mdwc->pwr_event_irq < 0) {
2871 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2872 ret = -EINVAL;
2873 goto err;
2874 } else {
2875 /* will be enabled in dwc3_msm_resume() */
2876 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2877 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2878 msm_dwc3_pwr_irq,
2879 msm_dwc3_pwr_irq_thread,
2880 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2881 "msm_dwc3", mdwc);
2882 if (ret) {
2883 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2884 ret);
2885 goto err;
2886 }
2887 }
2888
2889 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2890 if (!res) {
2891 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2892 } else {
2893 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2894 resource_size(res));
2895 if (IS_ERR_OR_NULL(tcsr)) {
2896 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2897 } else {
2898 /* Enable USB3 on the primary USB port. */
2899 writel_relaxed(0x1, tcsr);
2900 /*
2901 * Ensure that TCSR write is completed before
2902 * USB registers initialization.
2903 */
2904 mb();
2905 }
2906 }
2907
2908 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2909 if (!res) {
2910 dev_err(&pdev->dev, "missing memory base resource\n");
2911 ret = -ENODEV;
2912 goto err;
2913 }
2914
2915 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2916 resource_size(res));
2917 if (!mdwc->base) {
2918 dev_err(&pdev->dev, "ioremap failed\n");
2919 ret = -ENODEV;
2920 goto err;
2921 }
2922
2923 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2924 "ahb2phy_base");
2925 if (res) {
2926 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2927 res->start, resource_size(res));
2928 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2929 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2930 mdwc->ahb2phy_base = NULL;
2931 } else {
2932 /*
2933 * On some targets cfg_ahb_clk depends upon usb gdsc
2934 * regulator. If cfg_ahb_clk is enabled without
2935 * turning on usb gdsc regulator clk is stuck off.
2936 */
2937 dwc3_msm_config_gdsc(mdwc, 1);
2938 clk_prepare_enable(mdwc->cfg_ahb_clk);
2939 /* Configure AHB2PHY for one wait state read/write*/
2940 val = readl_relaxed(mdwc->ahb2phy_base +
2941 PERIPH_SS_AHB2PHY_TOP_CFG);
2942 if (val != ONE_READ_WRITE_WAIT) {
2943 writel_relaxed(ONE_READ_WRITE_WAIT,
2944 mdwc->ahb2phy_base +
2945 PERIPH_SS_AHB2PHY_TOP_CFG);
2946 /* complete above write before using USB PHY */
2947 mb();
2948 }
2949 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2950 dwc3_msm_config_gdsc(mdwc, 0);
2951 }
2952 }
2953
2954 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2955 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2956 if (IS_ERR(mdwc->dbm)) {
2957 dev_err(&pdev->dev, "unable to get dbm device\n");
2958 ret = -EPROBE_DEFER;
2959 goto err;
2960 }
2961 /*
2962 * Add power event if the dbm indicates coming out of L1
2963 * by interrupt
2964 */
2965 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2966 if (!mdwc->pwr_event_irq) {
2967 dev_err(&pdev->dev,
2968 "need pwr_event_irq exiting L1\n");
2969 ret = -EINVAL;
2970 goto err;
2971 }
2972 }
2973 }
2974
2975 ext_hub_reset_gpio = of_get_named_gpio(node,
2976 "qcom,ext-hub-reset-gpio", 0);
2977
2978 if (gpio_is_valid(ext_hub_reset_gpio)
2979 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2980 "qcom,ext-hub-reset-gpio"))) {
2981 /* reset external hub */
2982 gpio_direction_output(ext_hub_reset_gpio, 1);
2983 /*
2984 * Hub reset should be asserted for minimum 5microsec
2985 * before deasserting.
2986 */
2987 usleep_range(5, 1000);
2988 gpio_direction_output(ext_hub_reset_gpio, 0);
2989 }
2990
2991 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2992 &mdwc->tx_fifo_size))
2993 dev_err(&pdev->dev,
2994 "unable to read platform data tx fifo size\n");
2995
2996 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2997 "qcom,disable-host-mode-pm");
2998
2999 dwc3_set_notifier(&dwc3_msm_notify_event);
3000
3001 /* Assumes dwc3 is the first DT child of dwc3-msm */
3002 dwc3_node = of_get_next_available_child(node, NULL);
3003 if (!dwc3_node) {
3004 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3005 ret = -ENODEV;
3006 goto err;
3007 }
3008
3009 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3010 if (ret) {
3011 dev_err(&pdev->dev,
3012 "failed to add create dwc3 core\n");
3013 of_node_put(dwc3_node);
3014 goto err;
3015 }
3016
3017 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3018 of_node_put(dwc3_node);
3019 if (!mdwc->dwc3) {
3020 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3021 goto put_dwc3;
3022 }
3023
3024 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3025 "usb-phy", 0);
3026 if (IS_ERR(mdwc->hs_phy)) {
3027 dev_err(&pdev->dev, "unable to get hsphy device\n");
3028 ret = PTR_ERR(mdwc->hs_phy);
3029 goto put_dwc3;
3030 }
3031 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3032 "usb-phy", 1);
3033 if (IS_ERR(mdwc->ss_phy)) {
3034 dev_err(&pdev->dev, "unable to get ssphy device\n");
3035 ret = PTR_ERR(mdwc->ss_phy);
3036 goto put_dwc3;
3037 }
3038
3039 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3040 if (mdwc->bus_scale_table) {
3041 mdwc->bus_perf_client =
3042 msm_bus_scale_register_client(mdwc->bus_scale_table);
3043 }
3044
3045 dwc = platform_get_drvdata(mdwc->dwc3);
3046 if (!dwc) {
3047 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3048 goto put_dwc3;
3049 }
3050
3051 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3052 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3053
3054 if (cpu_to_affin)
3055 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3056
Mayank Ranaf4918d32016-12-15 13:35:55 -08003057 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3058 &mdwc->num_gsi_event_buffers);
3059
Mayank Rana511f3b22016-08-02 12:00:11 -07003060 /*
3061 * Clocks and regulators will not be turned on until the first time
3062 * runtime PM resume is called. This is to allow for booting up with
3063 * charger already connected so as not to disturb PHY line states.
3064 */
3065 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3066 atomic_set(&dwc->in_lpm, 1);
3067 pm_runtime_set_suspended(mdwc->dev);
3068 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3069 pm_runtime_use_autosuspend(mdwc->dev);
3070 pm_runtime_enable(mdwc->dev);
3071 device_init_wakeup(mdwc->dev, 1);
3072
3073 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3074 pm_runtime_get_noresume(mdwc->dev);
3075
3076 ret = dwc3_msm_extcon_register(mdwc);
3077 if (ret)
3078 goto put_dwc3;
3079
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303080 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3081 &mdwc->pm_qos_latency);
3082 if (ret) {
3083 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3084 mdwc->pm_qos_latency = 0;
3085 }
3086
Mayank Rana511f3b22016-08-02 12:00:11 -07003087 /* Update initial VBUS/ID state from extcon */
3088 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
3089 EXTCON_USB))
3090 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
3091 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
3092 EXTCON_USB_HOST))
3093 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
3094
3095 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003096 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003097
3098 schedule_delayed_work(&mdwc->sm_work, 0);
3099
3100 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3101 if (!dwc->is_drd && host_mode) {
3102 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3103 mdwc->id_state = DWC3_ID_GROUND;
3104 dwc3_ext_event_notify(mdwc);
3105 }
3106
3107 return 0;
3108
3109put_dwc3:
3110 platform_device_put(mdwc->dwc3);
3111 if (mdwc->bus_perf_client)
3112 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3113err:
3114 return ret;
3115}
3116
3117static int dwc3_msm_remove_children(struct device *dev, void *data)
3118{
3119 device_unregister(dev);
3120 return 0;
3121}
3122
3123static int dwc3_msm_remove(struct platform_device *pdev)
3124{
3125 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003126 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003127 int ret_pm;
3128
3129 device_remove_file(&pdev->dev, &dev_attr_mode);
3130
3131 if (cpu_to_affin)
3132 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3133
3134 /*
3135 * In case of system suspend, pm_runtime_get_sync fails.
3136 * Hence turn ON the clocks manually.
3137 */
3138 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003139 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003140 if (ret_pm < 0) {
3141 dev_err(mdwc->dev,
3142 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303143 if (mdwc->noc_aggr_clk)
3144 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003145 clk_prepare_enable(mdwc->utmi_clk);
3146 clk_prepare_enable(mdwc->core_clk);
3147 clk_prepare_enable(mdwc->iface_clk);
3148 clk_prepare_enable(mdwc->sleep_clk);
3149 if (mdwc->bus_aggr_clk)
3150 clk_prepare_enable(mdwc->bus_aggr_clk);
3151 clk_prepare_enable(mdwc->xo_clk);
3152 }
3153
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303154 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003155 cancel_delayed_work_sync(&mdwc->sm_work);
3156
3157 if (mdwc->hs_phy)
3158 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3159 platform_device_put(mdwc->dwc3);
3160 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3161
Mayank Rana08e41922017-03-02 15:25:48 -08003162 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003163 pm_runtime_disable(mdwc->dev);
3164 pm_runtime_barrier(mdwc->dev);
3165 pm_runtime_put_sync(mdwc->dev);
3166 pm_runtime_set_suspended(mdwc->dev);
3167 device_wakeup_disable(mdwc->dev);
3168
3169 if (mdwc->bus_perf_client)
3170 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3171
3172 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3173 regulator_disable(mdwc->vbus_reg);
3174
3175 disable_irq(mdwc->hs_phy_irq);
3176 if (mdwc->ss_phy_irq)
3177 disable_irq(mdwc->ss_phy_irq);
3178 disable_irq(mdwc->pwr_event_irq);
3179
3180 clk_disable_unprepare(mdwc->utmi_clk);
3181 clk_set_rate(mdwc->core_clk, 19200000);
3182 clk_disable_unprepare(mdwc->core_clk);
3183 clk_disable_unprepare(mdwc->iface_clk);
3184 clk_disable_unprepare(mdwc->sleep_clk);
3185 clk_disable_unprepare(mdwc->xo_clk);
3186 clk_put(mdwc->xo_clk);
3187
3188 dwc3_msm_config_gdsc(mdwc, 0);
3189
3190 return 0;
3191}
3192
Jack Pham4d4e9342016-12-07 19:25:02 -08003193static int dwc3_msm_host_notifier(struct notifier_block *nb,
3194 unsigned long event, void *ptr)
3195{
3196 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3197 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3198 struct usb_device *udev = ptr;
3199 union power_supply_propval pval;
3200 unsigned int max_power;
3201
3202 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3203 return NOTIFY_DONE;
3204
3205 if (!mdwc->usb_psy) {
3206 mdwc->usb_psy = power_supply_get_by_name("usb");
3207 if (!mdwc->usb_psy)
3208 return NOTIFY_DONE;
3209 }
3210
3211 /*
3212 * For direct-attach devices, new udev is direct child of root hub
3213 * i.e. dwc -> xhci -> root_hub -> udev
3214 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3215 */
3216 if (udev->parent && !udev->parent->parent &&
3217 udev->dev.parent->parent == &dwc->xhci->dev) {
3218 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003219 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3220 /*
3221 * Core clock rate can be reduced only if root
3222 * hub SS port is not enabled/connected.
3223 */
3224 clk_set_rate(mdwc->core_clk,
3225 mdwc->core_clk_rate_hs);
3226 dev_dbg(mdwc->dev,
3227 "set hs core clk rate %ld\n",
3228 mdwc->core_clk_rate_hs);
3229 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3230 } else {
3231 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3232 }
3233
Jack Pham4d4e9342016-12-07 19:25:02 -08003234 if (udev->speed >= USB_SPEED_SUPER)
3235 max_power = udev->actconfig->desc.bMaxPower * 8;
3236 else
3237 max_power = udev->actconfig->desc.bMaxPower * 2;
3238 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3239 dev_name(&udev->dev), max_power);
3240
3241 /* inform PMIC of max power so it can optimize boost */
3242 pval.intval = max_power * 1000;
3243 power_supply_set_property(mdwc->usb_psy,
3244 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3245 } else {
3246 pval.intval = 0;
3247 power_supply_set_property(mdwc->usb_psy,
3248 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003249
3250 /* set rate back to default core clk rate */
3251 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3252 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3253 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003254 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003255 }
3256 }
3257
3258 return NOTIFY_DONE;
3259}
3260
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303261static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3262{
3263 static bool curr_perf_mode;
3264 int latency = mdwc->pm_qos_latency;
3265
3266 if ((curr_perf_mode == perf_mode) || !latency)
3267 return;
3268
3269 if (perf_mode)
3270 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3271 else
3272 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3273 PM_QOS_DEFAULT_VALUE);
3274
3275 curr_perf_mode = perf_mode;
3276 pr_debug("%s: latency updated to: %d\n", __func__,
3277 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3278}
3279
3280static void msm_dwc3_perf_vote_work(struct work_struct *w)
3281{
3282 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3283 perf_vote_work.work);
3284 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3285 static unsigned long last_irq_cnt;
3286 bool in_perf_mode = false;
3287
3288 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3289 in_perf_mode = true;
3290
3291 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3292 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3293
3294 last_irq_cnt = dwc->irq_cnt;
3295 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3296 schedule_delayed_work(&mdwc->perf_vote_work,
3297 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3298}
3299
Mayank Rana511f3b22016-08-02 12:00:11 -07003300#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3301
3302/**
3303 * dwc3_otg_start_host - helper function for starting/stoping the host
3304 * controller driver.
3305 *
3306 * @mdwc: Pointer to the dwc3_msm structure.
3307 * @on: start / stop the host controller driver.
3308 *
3309 * Returns 0 on success otherwise negative errno.
3310 */
3311static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3312{
3313 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3314 int ret = 0;
3315
3316 if (!dwc->xhci)
3317 return -EINVAL;
3318
3319 /*
3320 * The vbus_reg pointer could have multiple values
3321 * NULL: regulator_get() hasn't been called, or was previously deferred
3322 * IS_ERR: regulator could not be obtained, so skip using it
3323 * Valid pointer otherwise
3324 */
3325 if (!mdwc->vbus_reg) {
3326 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3327 "vbus_dwc3");
3328 if (IS_ERR(mdwc->vbus_reg) &&
3329 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3330 /* regulators may not be ready, so retry again later */
3331 mdwc->vbus_reg = NULL;
3332 return -EPROBE_DEFER;
3333 }
3334 }
3335
3336 if (on) {
3337 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3338
Mayank Rana511f3b22016-08-02 12:00:11 -07003339 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003340 if (dwc->maximum_speed == USB_SPEED_SUPER)
3341 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3342
3343 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003344 dbg_event(0xFF, "StrtHost gync",
3345 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003346 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3347 if (!IS_ERR(mdwc->vbus_reg))
3348 ret = regulator_enable(mdwc->vbus_reg);
3349 if (ret) {
3350 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3351 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3352 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3353 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003354 dbg_event(0xFF, "vregerr psync",
3355 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003356 return ret;
3357 }
3358
3359 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3360
Jack Pham4d4e9342016-12-07 19:25:02 -08003361 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3362 usb_register_notify(&mdwc->host_nb);
3363
Mayank Rana511f3b22016-08-02 12:00:11 -07003364 /*
3365 * FIXME If micro A cable is disconnected during system suspend,
3366 * xhci platform device will be removed before runtime pm is
3367 * enabled for xhci device. Due to this, disable_depth becomes
3368 * greater than one and runtimepm is not enabled for next microA
3369 * connect. Fix this by calling pm_runtime_init for xhci device.
3370 */
3371 pm_runtime_init(&dwc->xhci->dev);
3372 ret = platform_device_add(dwc->xhci);
3373 if (ret) {
3374 dev_err(mdwc->dev,
3375 "%s: failed to add XHCI pdev ret=%d\n",
3376 __func__, ret);
3377 if (!IS_ERR(mdwc->vbus_reg))
3378 regulator_disable(mdwc->vbus_reg);
3379 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3380 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3381 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003382 dbg_event(0xFF, "pdeverr psync",
3383 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003384 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003385 return ret;
3386 }
3387
3388 /*
3389 * In some cases it is observed that USB PHY is not going into
3390 * suspend with host mode suspend functionality. Hence disable
3391 * XHCI's runtime PM here if disable_host_mode_pm is set.
3392 */
3393 if (mdwc->disable_host_mode_pm)
3394 pm_runtime_disable(&dwc->xhci->dev);
3395
3396 mdwc->in_host_mode = true;
3397 dwc3_usb3_phy_suspend(dwc, true);
3398
3399 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003400 dbg_event(0xFF, "StrtHost psync",
3401 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003402 pm_runtime_mark_last_busy(mdwc->dev);
3403 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303404#ifdef CONFIG_SMP
3405 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3406 mdwc->pm_qos_req_dma.irq = dwc->irq;
3407#endif
3408 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3409 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3410 /* start in perf mode for better performance initially */
3411 msm_dwc3_perf_vote_update(mdwc, true);
3412 schedule_delayed_work(&mdwc->perf_vote_work,
3413 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003414 } else {
3415 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3416
3417 if (!IS_ERR(mdwc->vbus_reg))
3418 ret = regulator_disable(mdwc->vbus_reg);
3419 if (ret) {
3420 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3421 return ret;
3422 }
3423
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303424 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3425 msm_dwc3_perf_vote_update(mdwc, false);
3426 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3427
Mayank Rana511f3b22016-08-02 12:00:11 -07003428 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003429 dbg_event(0xFF, "StopHost gsync",
3430 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003431 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3432 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3433 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3434 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003435 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003436
3437 /*
3438 * Perform USB hardware RESET (both core reset and DBM reset)
3439 * when moving from host to peripheral. This is required for
3440 * peripheral mode to work.
3441 */
3442 dwc3_msm_block_reset(mdwc, true);
3443
3444 dwc3_usb3_phy_suspend(dwc, false);
3445 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3446
3447 mdwc->in_host_mode = false;
3448
3449 /* re-init core and OTG registers as block reset clears these */
3450 dwc3_post_host_reset_core_init(dwc);
3451 pm_runtime_mark_last_busy(mdwc->dev);
3452 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003453 dbg_event(0xFF, "StopHost psync",
3454 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003455 }
3456
3457 return 0;
3458}
3459
3460static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3461{
3462 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3463
3464 /* Update OTG VBUS Valid from HSPHY to controller */
3465 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3466 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3467 UTMI_OTG_VBUS_VALID,
3468 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3469
3470 /* Update only if Super Speed is supported */
3471 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3472 /* Update VBUS Valid from SSPHY to controller */
3473 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3474 LANE0_PWR_PRESENT,
3475 vbus_present ? LANE0_PWR_PRESENT : 0);
3476 }
3477}
3478
3479/**
3480 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3481 *
3482 * @mdwc: Pointer to the dwc3_msm structure.
3483 * @on: Turn ON/OFF the gadget.
3484 *
3485 * Returns 0 on success otherwise negative errno.
3486 */
3487static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3488{
3489 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3490
3491 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003492 dbg_event(0xFF, "StrtGdgt gsync",
3493 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003494
3495 if (on) {
3496 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3497 __func__, dwc->gadget.name);
3498
3499 dwc3_override_vbus_status(mdwc, true);
3500 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3501 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3502
3503 /*
3504 * Core reset is not required during start peripheral. Only
3505 * DBM reset is required, hence perform only DBM reset here.
3506 */
3507 dwc3_msm_block_reset(mdwc, false);
3508
3509 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3510 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303511#ifdef CONFIG_SMP
3512 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3513 mdwc->pm_qos_req_dma.irq = dwc->irq;
3514#endif
3515 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3516 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3517 /* start in perf mode for better performance initially */
3518 msm_dwc3_perf_vote_update(mdwc, true);
3519 schedule_delayed_work(&mdwc->perf_vote_work,
3520 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003521 } else {
3522 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3523 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303524 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3525 msm_dwc3_perf_vote_update(mdwc, false);
3526 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3527
Mayank Rana511f3b22016-08-02 12:00:11 -07003528 usb_gadget_vbus_disconnect(&dwc->gadget);
3529 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3530 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3531 dwc3_override_vbus_status(mdwc, false);
3532 dwc3_usb3_phy_suspend(dwc, false);
3533 }
3534
3535 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003536 dbg_event(0xFF, "StopGdgt psync",
3537 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003538
3539 return 0;
3540}
3541
3542static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3543{
Jack Pham8caff352016-08-19 16:33:55 -07003544 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003545 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003546
3547 if (mdwc->charging_disabled)
3548 return 0;
3549
3550 if (mdwc->max_power == mA)
3551 return 0;
3552
3553 if (!mdwc->usb_psy) {
3554 mdwc->usb_psy = power_supply_get_by_name("usb");
3555 if (!mdwc->usb_psy) {
3556 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3557 return -ENODEV;
3558 }
3559 }
3560
Jack Pham8caff352016-08-19 16:33:55 -07003561 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3562 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3563 return 0;
3564
Mayank Rana511f3b22016-08-02 12:00:11 -07003565 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3566
Mayank Rana511f3b22016-08-02 12:00:11 -07003567 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003568 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003569 ret = power_supply_set_property(mdwc->usb_psy,
3570 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3571 if (ret) {
3572 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3573 return ret;
3574 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003575
3576 mdwc->max_power = mA;
3577 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003578}
3579
3580
3581/**
3582 * dwc3_otg_sm_work - workqueue function.
3583 *
3584 * @w: Pointer to the dwc3 otg workqueue
3585 *
3586 * NOTE: After any change in otg_state, we must reschdule the state machine.
3587 */
3588static void dwc3_otg_sm_work(struct work_struct *w)
3589{
3590 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3591 struct dwc3 *dwc = NULL;
3592 bool work = 0;
3593 int ret = 0;
3594 unsigned long delay = 0;
3595 const char *state;
3596
3597 if (mdwc->dwc3)
3598 dwc = platform_get_drvdata(mdwc->dwc3);
3599
3600 if (!dwc) {
3601 dev_err(mdwc->dev, "dwc is NULL.\n");
3602 return;
3603 }
3604
3605 state = usb_otg_state_string(mdwc->otg_state);
3606 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003607 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003608
3609 /* Check OTG state */
3610 switch (mdwc->otg_state) {
3611 case OTG_STATE_UNDEFINED:
3612 /* Do nothing if no cable connected */
3613 if (test_bit(ID, &mdwc->inputs) &&
3614 !test_bit(B_SESS_VLD, &mdwc->inputs))
3615 break;
3616
Mayank Rana08e41922017-03-02 15:25:48 -08003617 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003618 mdwc->otg_state = OTG_STATE_B_IDLE;
3619 /* fall-through */
3620 case OTG_STATE_B_IDLE:
3621 if (!test_bit(ID, &mdwc->inputs)) {
3622 dev_dbg(mdwc->dev, "!id\n");
3623 mdwc->otg_state = OTG_STATE_A_IDLE;
3624 work = 1;
3625 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3626 dev_dbg(mdwc->dev, "b_sess_vld\n");
3627 /*
3628 * Increment pm usage count upon cable connect. Count
3629 * is decremented in OTG_STATE_B_PERIPHERAL state on
3630 * cable disconnect or in bus suspend.
3631 */
3632 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003633 dbg_event(0xFF, "BIDLE gsync",
3634 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003635 dwc3_otg_start_peripheral(mdwc, 1);
3636 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3637 work = 1;
3638 } else {
3639 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3640 dev_dbg(mdwc->dev, "Cable disconnected\n");
3641 }
3642 break;
3643
3644 case OTG_STATE_B_PERIPHERAL:
3645 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3646 !test_bit(ID, &mdwc->inputs)) {
3647 dev_dbg(mdwc->dev, "!id || !bsv\n");
3648 mdwc->otg_state = OTG_STATE_B_IDLE;
3649 dwc3_otg_start_peripheral(mdwc, 0);
3650 /*
3651 * Decrement pm usage count upon cable disconnect
3652 * which was incremented upon cable connect in
3653 * OTG_STATE_B_IDLE state
3654 */
3655 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003656 dbg_event(0xFF, "!BSV psync",
3657 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003658 work = 1;
3659 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3660 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3661 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3662 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3663 /*
3664 * Decrement pm usage count upon bus suspend.
3665 * Count was incremented either upon cable
3666 * connect in OTG_STATE_B_IDLE or host
3667 * initiated resume after bus suspend in
3668 * OTG_STATE_B_SUSPEND state
3669 */
3670 pm_runtime_mark_last_busy(mdwc->dev);
3671 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003672 dbg_event(0xFF, "SUSP put",
3673 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003674 }
3675 break;
3676
3677 case OTG_STATE_B_SUSPEND:
3678 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3679 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3680 mdwc->otg_state = OTG_STATE_B_IDLE;
3681 dwc3_otg_start_peripheral(mdwc, 0);
3682 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3683 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3684 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3685 /*
3686 * Increment pm usage count upon host
3687 * initiated resume. Count was decremented
3688 * upon bus suspend in
3689 * OTG_STATE_B_PERIPHERAL state.
3690 */
3691 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003692 dbg_event(0xFF, "!SUSP gsync",
3693 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003694 }
3695 break;
3696
3697 case OTG_STATE_A_IDLE:
3698 /* Switch to A-Device*/
3699 if (test_bit(ID, &mdwc->inputs)) {
3700 dev_dbg(mdwc->dev, "id\n");
3701 mdwc->otg_state = OTG_STATE_B_IDLE;
3702 mdwc->vbus_retry_count = 0;
3703 work = 1;
3704 } else {
3705 mdwc->otg_state = OTG_STATE_A_HOST;
3706 ret = dwc3_otg_start_host(mdwc, 1);
3707 if ((ret == -EPROBE_DEFER) &&
3708 mdwc->vbus_retry_count < 3) {
3709 /*
3710 * Get regulator failed as regulator driver is
3711 * not up yet. Will try to start host after 1sec
3712 */
3713 mdwc->otg_state = OTG_STATE_A_IDLE;
3714 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3715 delay = VBUS_REG_CHECK_DELAY;
3716 work = 1;
3717 mdwc->vbus_retry_count++;
3718 } else if (ret) {
3719 dev_err(mdwc->dev, "unable to start host\n");
3720 mdwc->otg_state = OTG_STATE_A_IDLE;
3721 goto ret;
3722 }
3723 }
3724 break;
3725
3726 case OTG_STATE_A_HOST:
3727 if (test_bit(ID, &mdwc->inputs)) {
3728 dev_dbg(mdwc->dev, "id\n");
3729 dwc3_otg_start_host(mdwc, 0);
3730 mdwc->otg_state = OTG_STATE_B_IDLE;
3731 mdwc->vbus_retry_count = 0;
3732 work = 1;
3733 } else {
3734 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003735 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003736 if (dwc)
3737 pm_runtime_resume(&dwc->xhci->dev);
3738 }
3739 break;
3740
3741 default:
3742 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3743
3744 }
3745
3746 if (work)
3747 schedule_delayed_work(&mdwc->sm_work, delay);
3748
3749ret:
3750 return;
3751}
3752
3753#ifdef CONFIG_PM_SLEEP
3754static int dwc3_msm_pm_suspend(struct device *dev)
3755{
3756 int ret = 0;
3757 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3758 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3759
3760 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003761 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003762
3763 flush_workqueue(mdwc->dwc3_wq);
3764 if (!atomic_read(&dwc->in_lpm)) {
3765 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3766 return -EBUSY;
3767 }
3768
3769 ret = dwc3_msm_suspend(mdwc);
3770 if (!ret)
3771 atomic_set(&mdwc->pm_suspended, 1);
3772
3773 return ret;
3774}
3775
3776static int dwc3_msm_pm_resume(struct device *dev)
3777{
3778 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003779 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003780
3781 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003782 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003783
Mayank Rana511f3b22016-08-02 12:00:11 -07003784 /* flush to avoid race in read/write of pm_suspended */
3785 flush_workqueue(mdwc->dwc3_wq);
3786 atomic_set(&mdwc->pm_suspended, 0);
3787
3788 /* kick in otg state machine */
3789 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3790
3791 return 0;
3792}
3793#endif
3794
3795#ifdef CONFIG_PM
3796static int dwc3_msm_runtime_idle(struct device *dev)
3797{
Mayank Rana08e41922017-03-02 15:25:48 -08003798 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3799 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3800
Mayank Rana511f3b22016-08-02 12:00:11 -07003801 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003802 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003803
3804 return 0;
3805}
3806
3807static int dwc3_msm_runtime_suspend(struct device *dev)
3808{
3809 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003810 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003811
3812 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003813 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003814
3815 return dwc3_msm_suspend(mdwc);
3816}
3817
3818static int dwc3_msm_runtime_resume(struct device *dev)
3819{
3820 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003821 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003822
3823 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003824 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003825
3826 return dwc3_msm_resume(mdwc);
3827}
3828#endif
3829
3830static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3831 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3832 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3833 dwc3_msm_runtime_idle)
3834};
3835
3836static const struct of_device_id of_dwc3_matach[] = {
3837 {
3838 .compatible = "qcom,dwc-usb3-msm",
3839 },
3840 { },
3841};
3842MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3843
3844static struct platform_driver dwc3_msm_driver = {
3845 .probe = dwc3_msm_probe,
3846 .remove = dwc3_msm_remove,
3847 .driver = {
3848 .name = "msm-dwc3",
3849 .pm = &dwc3_msm_dev_pm_ops,
3850 .of_match_table = of_dwc3_matach,
3851 },
3852};
3853
3854MODULE_LICENSE("GPL v2");
3855MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3856
3857static int dwc3_msm_init(void)
3858{
3859 return platform_driver_register(&dwc3_msm_driver);
3860}
3861module_init(dwc3_msm_init);
3862
3863static void __exit dwc3_msm_exit(void)
3864{
3865 platform_driver_unregister(&dwc3_msm_driver);
3866}
3867module_exit(dwc3_msm_exit);