blob: 69d617fea7e0b21e26d9ef7cb5c9018d4ee3b55a [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
Hemant Kumar006fae42017-07-12 18:11:25 -070058#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
59
Mayank Rana511f3b22016-08-02 12:00:11 -070060/* time out to wait for USB cable status notification (in ms)*/
61#define SM_INIT_TIMEOUT 30000
62
63/* AHB2PHY register offsets */
64#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
65
66/* AHB2PHY read/write waite value */
67#define ONE_READ_WRITE_WAIT 0x11
68
69/* cpu to fix usb interrupt */
70static int cpu_to_affin;
71module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
73
Mayank Ranaf70d8212017-06-12 14:02:07 -070074/* override for USB speed */
75static int override_usb_speed;
76module_param(override_usb_speed, int, 0644);
77MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
78
Mayank Rana511f3b22016-08-02 12:00:11 -070079/* XHCI registers */
80#define USB3_HCSPARAMS1 (0x4)
81#define USB3_PORTSC (0x420)
82
83/**
84 * USB QSCRATCH Hardware registers
85 *
86 */
87#define QSCRATCH_REG_OFFSET (0x000F8800)
88#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
89#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
90#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
91#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
92
93#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
94#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
95#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
96#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
97#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
98
99/* QSCRATCH_GENERAL_CFG register bit offset */
100#define PIPE_UTMI_CLK_SEL BIT(0)
101#define PIPE3_PHYSTATUS_SW BIT(3)
102#define PIPE_UTMI_CLK_DIS BIT(8)
103
104#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
105#define UTMI_OTG_VBUS_VALID BIT(20)
106#define SW_SESSVLD_SEL BIT(28)
107
108#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
109#define LANE0_PWR_PRESENT BIT(24)
110
111/* GSI related registers */
112#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
113#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
114
115#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
116#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
117#define GSI_CLK_EN_MASK BIT(12)
118#define BLOCK_GSI_WR_GO_MASK BIT(1)
119#define GSI_EN_MASK BIT(0)
120
121#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
122#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
123#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
124#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
125
126#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
127#define GSI_WR_CTRL_STATE_MASK BIT(15)
128
Mayank Ranaf4918d32016-12-15 13:35:55 -0800129#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
130#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
131#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
132#define DWC3_GEVENT_TYPE_GSI 0x3
133
Mayank Rana511f3b22016-08-02 12:00:11 -0700134struct dwc3_msm_req_complete {
135 struct list_head list_item;
136 struct usb_request *req;
137 void (*orig_complete)(struct usb_ep *ep,
138 struct usb_request *req);
139};
140
141enum dwc3_id_state {
142 DWC3_ID_GROUND = 0,
143 DWC3_ID_FLOAT,
144};
145
146/* for type c cable */
147enum plug_orientation {
148 ORIENTATION_NONE,
149 ORIENTATION_CC1,
150 ORIENTATION_CC2,
151};
152
Mayank Ranad339abe2017-05-31 09:19:49 -0700153enum msm_usb_irq {
154 HS_PHY_IRQ,
155 PWR_EVNT_IRQ,
156 DP_HS_PHY_IRQ,
157 DM_HS_PHY_IRQ,
158 SS_PHY_IRQ,
159 USB_MAX_IRQ
160};
161
162struct usb_irq {
163 char *name;
164 int irq;
165 bool enable;
166};
167
168static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
169 {"hs_phy_irq", 0},
170 {"pwr_event_irq", 0},
171 {"dp_hs_phy_irq", 0},
172 {"dm_hs_phy_irq", 0},
173 {"ss_phy_irq", 0},
174};
175
Mayank Rana511f3b22016-08-02 12:00:11 -0700176/* Input bits to state machine (mdwc->inputs) */
177
178#define ID 0
179#define B_SESS_VLD 1
180#define B_SUSPEND 2
181
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530182#define PM_QOS_SAMPLE_SEC 2
183#define PM_QOS_THRESHOLD 400
184
Mayank Rana511f3b22016-08-02 12:00:11 -0700185struct dwc3_msm {
186 struct device *dev;
187 void __iomem *base;
188 void __iomem *ahb2phy_base;
189 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700190 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
192 struct list_head req_complete_list;
193 struct clk *xo_clk;
194 struct clk *core_clk;
195 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800196 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 struct clk *iface_clk;
198 struct clk *sleep_clk;
199 struct clk *utmi_clk;
200 unsigned int utmi_clk_rate;
201 struct clk *utmi_clk_src;
202 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530203 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530205 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 struct regulator *dwc3_gdsc;
207
208 struct usb_phy *hs_phy, *ss_phy;
209
210 struct dbm *dbm;
211
212 /* VBUS regulator for host mode */
213 struct regulator *vbus_reg;
214 int vbus_retry_count;
215 bool resume_pending;
216 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700217 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700218 struct work_struct resume_work;
219 struct work_struct restart_usb_work;
220 bool in_restart;
221 struct workqueue_struct *dwc3_wq;
222 struct delayed_work sm_work;
223 unsigned long inputs;
224 unsigned int max_power;
225 bool charging_disabled;
226 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227 u32 bus_perf_client;
228 struct msm_bus_scale_pdata *bus_scale_table;
229 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700230 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800232 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700233 unsigned int tx_fifo_size;
234 bool vbus_active;
235 bool suspend;
236 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700237 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700238 enum dwc3_id_state id_state;
239 unsigned long lpm_flags;
240#define MDWC3_SS_PHY_SUSPEND BIT(0)
241#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
242#define MDWC3_POWER_COLLAPSE BIT(2)
243
244 unsigned int irq_to_affin;
245 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530246 struct notifier_block usbdev_nb;
247 bool hc_died;
Mayank Rana511f3b22016-08-02 12:00:11 -0700248
249 struct extcon_dev *extcon_vbus;
250 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800251 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700252 struct notifier_block vbus_nb;
253 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800254 struct notifier_block eud_event_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700255
Jack Pham4d4e9342016-12-07 19:25:02 -0800256 struct notifier_block host_nb;
257
Mayank Rana511f3b22016-08-02 12:00:11 -0700258 atomic_t in_p3;
259 unsigned int lpm_to_suspend_delay;
260 bool init;
261 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800262 u32 num_gsi_event_buffers;
263 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530264 int pm_qos_latency;
265 struct pm_qos_request pm_qos_req_dma;
266 struct delayed_work perf_vote_work;
Hemant Kumar006fae42017-07-12 18:11:25 -0700267 struct delayed_work sdp_check;
Mayank Rana511f3b22016-08-02 12:00:11 -0700268};
269
270#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
271#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
272#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
273
274#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
275#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
276#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
277
278#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
279#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
280#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
281
282#define DSTS_CONNECTSPD_SS 0x4
283
284
285static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
286static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800287static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700288
289static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
290{
291
292 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
293 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
294 && (speed <= dwc->maximum_speed));
295}
296
Mayank Rana511f3b22016-08-02 12:00:11 -0700297/**
298 *
299 * Read register with debug info.
300 *
301 * @base - DWC3 base virtual address.
302 * @offset - register offset.
303 *
304 * @return u32
305 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700306static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700307{
308 u32 val = ioread32(base + offset);
309 return val;
310}
311
312/**
313 * Read register masked field with debug info.
314 *
315 * @base - DWC3 base virtual address.
316 * @offset - register offset.
317 * @mask - register bitmask.
318 *
319 * @return u32
320 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700321static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700322 u32 offset,
323 const u32 mask)
324{
Mayank Ranad796cab2017-07-11 15:34:12 -0700325 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700326 u32 val = ioread32(base + offset);
327
328 val &= mask; /* clear other bits */
329 val >>= shift;
330 return val;
331}
332
333/**
334 *
335 * Write register with debug info.
336 *
337 * @base - DWC3 base virtual address.
338 * @offset - register offset.
339 * @val - value to write.
340 *
341 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700342static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700343{
344 iowrite32(val, base + offset);
345}
346
347/**
348 * Write register masked field with debug info.
349 *
350 * @base - DWC3 base virtual address.
351 * @offset - register offset.
352 * @mask - register bitmask.
353 * @val - value to write.
354 *
355 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700356static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700357 const u32 mask, u32 val)
358{
Mayank Ranad796cab2017-07-11 15:34:12 -0700359 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700360 u32 tmp = ioread32(base + offset);
361
362 tmp &= ~mask; /* clear written bits */
363 val = tmp | (val << shift);
364 iowrite32(val, base + offset);
365}
366
367/**
368 * Write register and read back masked value to confirm it is written
369 *
370 * @base - DWC3 base virtual address.
371 * @offset - register offset.
372 * @mask - register bitmask specifying what should be updated
373 * @val - value to write.
374 *
375 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700376static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700377 const u32 mask, u32 val)
378{
379 u32 write_val, tmp = ioread32(base + offset);
380
381 tmp &= ~mask; /* retain other bits */
382 write_val = tmp | val;
383
384 iowrite32(write_val, base + offset);
385
386 /* Read back to see if val was written */
387 tmp = ioread32(base + offset);
388 tmp &= mask; /* clear other bits */
389
390 if (tmp != val)
391 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
392 __func__, val, offset);
393}
394
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800395static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
396{
397 int i, num_ports;
398 u32 reg;
399
400 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
401 num_ports = HCS_MAX_PORTS(reg);
402
403 for (i = 0; i < num_ports; i++) {
404 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
405 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
406 return true;
407 }
408
409 return false;
410}
411
Mayank Rana511f3b22016-08-02 12:00:11 -0700412static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
413{
414 int i, num_ports;
415 u32 reg;
416
417 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
418 num_ports = HCS_MAX_PORTS(reg);
419
420 for (i = 0; i < num_ports; i++) {
421 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
422 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
423 return true;
424 }
425
426 return false;
427}
428
429static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
430{
431 u8 speed;
432
433 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
434 return !!(speed & DSTS_CONNECTSPD_SS);
435}
436
437static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
438{
439 if (mdwc->in_host_mode)
440 return dwc3_msm_is_host_superspeed(mdwc);
441
442 return dwc3_msm_is_dev_superspeed(mdwc);
443}
444
445#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
446/**
447 * Configure the DBM with the BAM's data fifo.
448 * This function is called by the USB BAM Driver
449 * upon initialization.
450 *
451 * @ep - pointer to usb endpoint.
452 * @addr - address of data fifo.
453 * @size - size of data fifo.
454 *
455 */
456int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
457 u32 size, u8 dst_pipe_idx)
458{
459 struct dwc3_ep *dep = to_dwc3_ep(ep);
460 struct dwc3 *dwc = dep->dwc;
461 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
462
463 dev_dbg(mdwc->dev, "%s\n", __func__);
464
465 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
466 dst_pipe_idx);
467}
468
469
470/**
471* Cleanups for msm endpoint on request complete.
472*
473* Also call original request complete.
474*
475* @usb_ep - pointer to usb_ep instance.
476* @request - pointer to usb_request instance.
477*
478* @return int - 0 on success, negative on error.
479*/
480static void dwc3_msm_req_complete_func(struct usb_ep *ep,
481 struct usb_request *request)
482{
483 struct dwc3_ep *dep = to_dwc3_ep(ep);
484 struct dwc3 *dwc = dep->dwc;
485 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
486 struct dwc3_msm_req_complete *req_complete = NULL;
487
488 /* Find original request complete function and remove it from list */
489 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
490 if (req_complete->req == request)
491 break;
492 }
493 if (!req_complete || req_complete->req != request) {
494 dev_err(dep->dwc->dev, "%s: could not find the request\n",
495 __func__);
496 return;
497 }
498 list_del(&req_complete->list_item);
499
500 /*
501 * Release another one TRB to the pool since DBM queue took 2 TRBs
502 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
503 * released only one.
504 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700505 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700506
507 /* Unconfigure dbm ep */
508 dbm_ep_unconfig(mdwc->dbm, dep->number);
509
510 /*
511 * If this is the last endpoint we unconfigured, than reset also
512 * the event buffers; unless unconfiguring the ep due to lpm,
513 * in which case the event buffer only gets reset during the
514 * block reset.
515 */
516 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
517 !dbm_reset_ep_after_lpm(mdwc->dbm))
518 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
519
520 /*
521 * Call original complete function, notice that dwc->lock is already
522 * taken by the caller of this function (dwc3_gadget_giveback()).
523 */
524 request->complete = req_complete->orig_complete;
525 if (request->complete)
526 request->complete(ep, request);
527
528 kfree(req_complete);
529}
530
531
532/**
533* Helper function
534*
535* Reset DBM endpoint.
536*
537* @mdwc - pointer to dwc3_msm instance.
538* @dep - pointer to dwc3_ep instance.
539*
540* @return int - 0 on success, negative on error.
541*/
542static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
543{
544 int ret;
545
546 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
547
548 /* Reset the dbm endpoint */
549 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
550 if (ret) {
551 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
552 __func__);
553 return ret;
554 }
555
556 /*
557 * The necessary delay between asserting and deasserting the dbm ep
558 * reset is based on the number of active endpoints. If there is more
559 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
560 * delay will suffice.
561 */
562 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
563 usleep_range(1000, 1200);
564 else
565 udelay(10);
566 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
567 if (ret) {
568 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
569 __func__);
570 return ret;
571 }
572
573 return 0;
574}
575
576/**
577* Reset the DBM endpoint which is linked to the given USB endpoint.
578*
579* @usb_ep - pointer to usb_ep instance.
580*
581* @return int - 0 on success, negative on error.
582*/
583
584int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
585{
586 struct dwc3_ep *dep = to_dwc3_ep(ep);
587 struct dwc3 *dwc = dep->dwc;
588 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
589
590 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
591}
592EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
593
594
595/**
596* Helper function.
597* See the header of the dwc3_msm_ep_queue function.
598*
599* @dwc3_ep - pointer to dwc3_ep instance.
600* @req - pointer to dwc3_request instance.
601*
602* @return int - 0 on success, negative on error.
603*/
604static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
605{
606 struct dwc3_trb *trb;
607 struct dwc3_trb *trb_link;
608 struct dwc3_gadget_ep_cmd_params params;
609 u32 cmd;
610 int ret = 0;
611
Mayank Rana83ad5822016-08-09 14:17:22 -0700612 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700613 * this request is issued with start transfer. The request will be out
614 * from this list in 2 cases. The first is that the transfer will be
615 * completed (not if the transfer is endless using a circular TRBs with
616 * with link TRB). The second case is an option to do stop stransfer,
617 * this can be initiated by the function driver when calling dequeue.
618 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700619 req->started = true;
620 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700621
622 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700623 trb = &dep->trb_pool[dep->trb_enqueue];
624 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700625 memset(trb, 0, sizeof(*trb));
626
627 req->trb = trb;
628 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
629 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
630 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
631 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
632 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
633
634 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700635 trb_link = &dep->trb_pool[dep->trb_enqueue];
636 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700637 memset(trb_link, 0, sizeof(*trb_link));
638
639 trb_link->bpl = lower_32_bits(req->trb_dma);
640 trb_link->bph = DBM_TRB_BIT |
641 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
642 trb_link->size = 0;
643 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
644
645 /*
646 * Now start the transfer
647 */
648 memset(&params, 0, sizeof(params));
649 params.param0 = 0; /* TDAddr High */
650 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
651
652 /* DBM requires IOC to be set */
653 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700654 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700655 if (ret < 0) {
656 dev_dbg(dep->dwc->dev,
657 "%s: failed to send STARTTRANSFER command\n",
658 __func__);
659
660 list_del(&req->list);
661 return ret;
662 }
663 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700664 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700665
666 return ret;
667}
668
669/**
670* Queue a usb request to the DBM endpoint.
671* This function should be called after the endpoint
672* was enabled by the ep_enable.
673*
674* This function prepares special structure of TRBs which
675* is familiar with the DBM HW, so it will possible to use
676* this endpoint in DBM mode.
677*
678* The TRBs prepared by this function, is one normal TRB
679* which point to a fake buffer, followed by a link TRB
680* that points to the first TRB.
681*
682* The API of this function follow the regular API of
683* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
684*
685* @usb_ep - pointer to usb_ep instance.
686* @request - pointer to usb_request instance.
687* @gfp_flags - possible flags.
688*
689* @return int - 0 on success, negative on error.
690*/
691static int dwc3_msm_ep_queue(struct usb_ep *ep,
692 struct usb_request *request, gfp_t gfp_flags)
693{
694 struct dwc3_request *req = to_dwc3_request(request);
695 struct dwc3_ep *dep = to_dwc3_ep(ep);
696 struct dwc3 *dwc = dep->dwc;
697 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
698 struct dwc3_msm_req_complete *req_complete;
699 unsigned long flags;
700 int ret = 0, size;
701 u8 bam_pipe;
702 bool producer;
703 bool disable_wb;
704 bool internal_mem;
705 bool ioc;
706 bool superspeed;
707
708 if (!(request->udc_priv & MSM_SPS_MODE)) {
709 /* Not SPS mode, call original queue */
710 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
711 __func__);
712
713 return (mdwc->original_ep_ops[dep->number])->queue(ep,
714 request,
715 gfp_flags);
716 }
717
718 /* HW restriction regarding TRB size (8KB) */
719 if (req->request.length < 0x2000) {
720 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
721 return -EINVAL;
722 }
723
724 /*
725 * Override req->complete function, but before doing that,
726 * store it's original pointer in the req_complete_list.
727 */
728 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
729 if (!req_complete)
730 return -ENOMEM;
731
732 req_complete->req = request;
733 req_complete->orig_complete = request->complete;
734 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
735 request->complete = dwc3_msm_req_complete_func;
736
737 /*
738 * Configure the DBM endpoint
739 */
740 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
741 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
742 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
743 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
744 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
745
746 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
747 disable_wb, internal_mem, ioc);
748 if (ret < 0) {
749 dev_err(mdwc->dev,
750 "error %d after calling dbm_ep_config\n", ret);
751 return ret;
752 }
753
754 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
755 __func__, request, ep->name, request->length);
756 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
757 dbm_event_buffer_config(mdwc->dbm,
758 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
759 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
760 DWC3_GEVNTSIZ_SIZE(size));
761
762 /*
763 * We must obtain the lock of the dwc3 core driver,
764 * including disabling interrupts, so we will be sure
765 * that we are the only ones that configure the HW device
766 * core and ensure that we queuing the request will finish
767 * as soon as possible so we will release back the lock.
768 */
769 spin_lock_irqsave(&dwc->lock, flags);
770 if (!dep->endpoint.desc) {
771 dev_err(mdwc->dev,
772 "%s: trying to queue request %p to disabled ep %s\n",
773 __func__, request, ep->name);
774 ret = -EPERM;
775 goto err;
776 }
777
778 if (dep->number == 0 || dep->number == 1) {
779 dev_err(mdwc->dev,
780 "%s: trying to queue dbm request %p to control ep %s\n",
781 __func__, request, ep->name);
782 ret = -EPERM;
783 goto err;
784 }
785
786
Mayank Rana83ad5822016-08-09 14:17:22 -0700787 if (dep->trb_dequeue != dep->trb_enqueue ||
788 !list_empty(&dep->pending_list)
789 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700790 dev_err(mdwc->dev,
791 "%s: trying to queue dbm request %p tp ep %s\n",
792 __func__, request, ep->name);
793 ret = -EPERM;
794 goto err;
795 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700796 dep->trb_dequeue = 0;
797 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700798 }
799
800 ret = __dwc3_msm_ep_queue(dep, req);
801 if (ret < 0) {
802 dev_err(mdwc->dev,
803 "error %d after calling __dwc3_msm_ep_queue\n", ret);
804 goto err;
805 }
806
807 spin_unlock_irqrestore(&dwc->lock, flags);
808 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
809 dbm_set_speed(mdwc->dbm, (u8)superspeed);
810
811 return 0;
812
813err:
814 spin_unlock_irqrestore(&dwc->lock, flags);
815 kfree(req_complete);
816 return ret;
817}
818
819/*
820* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
821*
822* @usb_ep - pointer to usb_ep instance.
823*
824* @return int - XferRscIndex
825*/
826static inline int gsi_get_xfer_index(struct usb_ep *ep)
827{
828 struct dwc3_ep *dep = to_dwc3_ep(ep);
829
830 return dep->resource_index;
831}
832
833/*
834* Fills up the GSI channel information needed in call to IPA driver
835* for GSI channel creation.
836*
837* @usb_ep - pointer to usb_ep instance.
838* @ch_info - output parameter with requested channel info
839*/
840static void gsi_get_channel_info(struct usb_ep *ep,
841 struct gsi_channel_info *ch_info)
842{
843 struct dwc3_ep *dep = to_dwc3_ep(ep);
844 int last_trb_index = 0;
845 struct dwc3 *dwc = dep->dwc;
846 struct usb_gsi_request *request = ch_info->ch_req;
847
848 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
849 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700850 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
851
Mayank Rana511f3b22016-08-02 12:00:11 -0700852 ch_info->depcmd_hi_addr = 0;
853
854 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
855 &dep->trb_pool[0]);
856 /* Convert to multipled of 1KB */
857 ch_info->const_buffer_size = request->buf_len/1024;
858
859 /* IN direction */
860 if (dep->direction) {
861 /*
862 * Multiply by size of each TRB for xfer_ring_len in bytes.
863 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
864 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
865 */
866 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
867 last_trb_index = 2 * request->num_bufs + 2;
868 } else { /* OUT direction */
869 /*
870 * Multiply by size of each TRB for xfer_ring_len in bytes.
871 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
872 * LINK TRB.
873 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700874 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
875 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700876 }
877
878 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
879 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
880 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
881 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
882 DWC3_GEVNTCOUNT(ep->ep_intr_num));
883 ch_info->gevntcount_hi_addr = 0;
884
885 dev_dbg(dwc->dev,
886 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
887 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
888 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
889}
890
891/*
892* Perform StartXfer on GSI EP. Stores XferRscIndex.
893*
894* @usb_ep - pointer to usb_ep instance.
895*
896* @return int - 0 on success
897*/
898static int gsi_startxfer_for_ep(struct usb_ep *ep)
899{
900 int ret;
901 struct dwc3_gadget_ep_cmd_params params;
902 u32 cmd;
903 struct dwc3_ep *dep = to_dwc3_ep(ep);
904 struct dwc3 *dwc = dep->dwc;
905
906 memset(&params, 0, sizeof(params));
907 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
908 params.param0 |= (ep->ep_intr_num << 16);
909 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
910 &dep->trb_pool[0]));
911 cmd = DWC3_DEPCMD_STARTTRANSFER;
912 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700913 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700914
915 if (ret < 0)
916 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700917 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700918 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
919 return ret;
920}
921
922/*
923* Store Ring Base and Doorbell Address for GSI EP
924* for GSI channel creation.
925*
926* @usb_ep - pointer to usb_ep instance.
927* @dbl_addr - Doorbell address obtained from IPA driver
928*/
929static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
930{
931 struct dwc3_ep *dep = to_dwc3_ep(ep);
932 struct dwc3 *dwc = dep->dwc;
933 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
934 int n = ep->ep_intr_num - 1;
935
936 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
937 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
938 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
939
940 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
941 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
942 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
943 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
944}
945
946/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700947* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700948*
949* @usb_ep - pointer to usb_ep instance.
950* @request - pointer to GSI request. This is used to pass in the
951* address of the GSI doorbell obtained from IPA driver
952*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700953static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700954{
955 void __iomem *gsi_dbl_address_lsb;
956 void __iomem *gsi_dbl_address_msb;
957 dma_addr_t offset;
958 u64 dbl_addr = *((u64 *)request->buf_base_addr);
959 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
960 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700961 struct dwc3_ep *dep = to_dwc3_ep(ep);
962 struct dwc3 *dwc = dep->dwc;
963 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700964 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
965 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700966
967 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
968 dbl_lo_addr, sizeof(u32));
969 if (!gsi_dbl_address_lsb)
970 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
971
972 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
973 dbl_hi_addr, sizeof(u32));
974 if (!gsi_dbl_address_msb)
975 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
976
977 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700978 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
979 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700980
981 writel_relaxed(offset, gsi_dbl_address_lsb);
982 writel_relaxed(0, gsi_dbl_address_msb);
983}
984
985/*
986* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
987*
988* @usb_ep - pointer to usb_ep instance.
989* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
990*
991* @return int - 0 on success
992*/
993static int gsi_updatexfer_for_ep(struct usb_ep *ep,
994 struct usb_gsi_request *request)
995{
996 int i;
997 int ret;
998 u32 cmd;
999 int num_trbs = request->num_bufs + 1;
1000 struct dwc3_trb *trb;
1001 struct dwc3_gadget_ep_cmd_params params;
1002 struct dwc3_ep *dep = to_dwc3_ep(ep);
1003 struct dwc3 *dwc = dep->dwc;
1004
1005 for (i = 0; i < num_trbs - 1; i++) {
1006 trb = &dep->trb_pool[i];
1007 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1008 }
1009
1010 memset(&params, 0, sizeof(params));
1011 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1012 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001013 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001014 dep->flags |= DWC3_EP_BUSY;
1015 if (ret < 0)
1016 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1017 return ret;
1018}
1019
1020/*
1021* Perform EndXfer on particular GSI EP.
1022*
1023* @usb_ep - pointer to usb_ep instance.
1024*/
1025static void gsi_endxfer_for_ep(struct usb_ep *ep)
1026{
1027 struct dwc3_ep *dep = to_dwc3_ep(ep);
1028 struct dwc3 *dwc = dep->dwc;
1029
1030 dwc3_stop_active_transfer(dwc, dep->number, true);
1031}
1032
1033/*
1034* Allocates and configures TRBs for GSI EPs.
1035*
1036* @usb_ep - pointer to usb_ep instance.
1037* @request - pointer to GSI request.
1038*
1039* @return int - 0 on success
1040*/
1041static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1042{
1043 int i = 0;
1044 dma_addr_t buffer_addr = req->dma;
1045 struct dwc3_ep *dep = to_dwc3_ep(ep);
1046 struct dwc3 *dwc = dep->dwc;
1047 struct dwc3_trb *trb;
1048 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001049 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001050
Jack Phambbe27962017-03-23 18:42:26 -07001051 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001052 num_trbs * sizeof(struct dwc3_trb),
1053 num_trbs * sizeof(struct dwc3_trb), 0);
1054 if (!dep->trb_dma_pool) {
1055 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1056 dep->name);
1057 return -ENOMEM;
1058 }
1059
1060 dep->num_trbs = num_trbs;
1061
1062 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1063 GFP_KERNEL, &dep->trb_pool_dma);
1064 if (!dep->trb_pool) {
1065 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1066 dep->name);
1067 return -ENOMEM;
1068 }
1069
1070 /* IN direction */
1071 if (dep->direction) {
1072 for (i = 0; i < num_trbs ; i++) {
1073 trb = &dep->trb_pool[i];
1074 memset(trb, 0, sizeof(*trb));
1075 /* Set up first n+1 TRBs for ZLPs */
1076 if (i < (req->num_bufs + 1)) {
1077 trb->bpl = 0;
1078 trb->bph = 0;
1079 trb->size = 0;
1080 trb->ctrl = DWC3_TRBCTL_NORMAL
1081 | DWC3_TRB_CTRL_IOC;
1082 continue;
1083 }
1084
1085 /* Setup n TRBs pointing to valid buffers */
1086 trb->bpl = lower_32_bits(buffer_addr);
1087 trb->bph = 0;
1088 trb->size = 0;
1089 trb->ctrl = DWC3_TRBCTL_NORMAL
1090 | DWC3_TRB_CTRL_IOC;
1091 buffer_addr += req->buf_len;
1092
1093 /* Set up the Link TRB at the end */
1094 if (i == (num_trbs - 1)) {
1095 trb->bpl = dwc3_trb_dma_offset(dep,
1096 &dep->trb_pool[0]);
1097 trb->bph = (1 << 23) | (1 << 21)
1098 | (ep->ep_intr_num << 16);
1099 trb->size = 0;
1100 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1101 | DWC3_TRB_CTRL_HWO;
1102 }
1103 }
1104 } else { /* OUT direction */
1105
1106 for (i = 0; i < num_trbs ; i++) {
1107
1108 trb = &dep->trb_pool[i];
1109 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001110 /* Setup LINK TRB to start with TRB ring */
1111 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001112 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001113 &dep->trb_pool[1]);
1114 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1115 } else if (i == (num_trbs - 1)) {
1116 /* Set up the Link TRB at the end */
1117 trb->bpl = dwc3_trb_dma_offset(dep,
1118 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001119 trb->bph = (1 << 23) | (1 << 21)
1120 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001121 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1122 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001123 } else {
1124 trb->bpl = lower_32_bits(buffer_addr);
1125 trb->size = req->buf_len;
1126 buffer_addr += req->buf_len;
1127 trb->ctrl = DWC3_TRBCTL_NORMAL
1128 | DWC3_TRB_CTRL_IOC
1129 | DWC3_TRB_CTRL_CSP
1130 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001131 }
1132 }
1133 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001134
1135 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1136 trb = &dep->trb_pool[0];
1137 if (trb) {
1138 for (i = 0; i < num_trbs; i++) {
1139 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1140 i, (unsigned long)dwc3_trb_dma_offset(dep,
1141 &dep->trb_pool[i]), trb->bpl, trb->bph,
1142 trb->size, trb->ctrl);
1143 trb++;
1144 }
1145 }
1146
Mayank Rana511f3b22016-08-02 12:00:11 -07001147 return 0;
1148}
1149
1150/*
1151* Frees TRBs for GSI EPs.
1152*
1153* @usb_ep - pointer to usb_ep instance.
1154*
1155*/
1156static void gsi_free_trbs(struct usb_ep *ep)
1157{
1158 struct dwc3_ep *dep = to_dwc3_ep(ep);
1159
1160 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1161 return;
1162
1163 /* Free TRBs and TRB pool for EP */
1164 if (dep->trb_dma_pool) {
1165 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1166 dep->trb_pool_dma);
1167 dma_pool_destroy(dep->trb_dma_pool);
1168 dep->trb_pool = NULL;
1169 dep->trb_pool_dma = 0;
1170 dep->trb_dma_pool = NULL;
1171 }
1172}
1173/*
1174* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1175*
1176* @usb_ep - pointer to usb_ep instance.
1177* @request - pointer to GSI request.
1178*/
1179static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1180{
1181 struct dwc3_ep *dep = to_dwc3_ep(ep);
1182 struct dwc3 *dwc = dep->dwc;
1183 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1184 struct dwc3_gadget_ep_cmd_params params;
1185 const struct usb_endpoint_descriptor *desc = ep->desc;
1186 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001187 u32 reg;
1188 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001189
1190 memset(&params, 0x00, sizeof(params));
1191
1192 /* Configure GSI EP */
1193 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1194 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1195
1196 /* Burst size is only needed in SuperSpeed mode */
1197 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1198 u32 burst = dep->endpoint.maxburst - 1;
1199
1200 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1201 }
1202
1203 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1204 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1205 | DWC3_DEPCFG_STREAM_EVENT_EN;
1206 dep->stream_capable = true;
1207 }
1208
1209 /* Set EP number */
1210 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1211
1212 /* Set interrupter number for GSI endpoints */
1213 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1214
1215 /* Enable XferInProgress and XferComplete Interrupts */
1216 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1217 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1218 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1219 /*
1220 * We must use the lower 16 TX FIFOs even though
1221 * HW might have more
1222 */
1223 /* Remove FIFO Number for GSI EP*/
1224 if (dep->direction)
1225 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1226
1227 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1228
1229 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1230 params.param0, params.param1, params.param2, dep->name);
1231
Mayank Rana83ad5822016-08-09 14:17:22 -07001232 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001233
1234 /* Set XferRsc Index for GSI EP */
1235 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001236 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1237 if (ret)
1238 return;
1239
Mayank Rana511f3b22016-08-02 12:00:11 -07001240 memset(&params, 0x00, sizeof(params));
1241 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001242 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001243 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1244
1245 dep->endpoint.desc = desc;
1246 dep->comp_desc = comp_desc;
1247 dep->type = usb_endpoint_type(desc);
1248 dep->flags |= DWC3_EP_ENABLED;
1249 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1250 reg |= DWC3_DALEPENA_EP(dep->number);
1251 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1252 }
1253
1254}
1255
1256/*
1257* Enables USB wrapper for GSI
1258*
1259* @usb_ep - pointer to usb_ep instance.
1260*/
1261static void gsi_enable(struct usb_ep *ep)
1262{
1263 struct dwc3_ep *dep = to_dwc3_ep(ep);
1264 struct dwc3 *dwc = dep->dwc;
1265 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1266
1267 dwc3_msm_write_reg_field(mdwc->base,
1268 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1269 dwc3_msm_write_reg_field(mdwc->base,
1270 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1271 dwc3_msm_write_reg_field(mdwc->base,
1272 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1273 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1274 dwc3_msm_write_reg_field(mdwc->base,
1275 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1276}
1277
1278/*
1279* Block or allow doorbell towards GSI
1280*
1281* @usb_ep - pointer to usb_ep instance.
1282* @request - pointer to GSI request. In this case num_bufs is used as a bool
1283* to set or clear the doorbell bit
1284*/
1285static void gsi_set_clear_dbell(struct usb_ep *ep,
1286 bool block_db)
1287{
1288
1289 struct dwc3_ep *dep = to_dwc3_ep(ep);
1290 struct dwc3 *dwc = dep->dwc;
1291 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1292
1293 dwc3_msm_write_reg_field(mdwc->base,
1294 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1295}
1296
1297/*
1298* Performs necessary checks before stopping GSI channels
1299*
1300* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1301*/
1302static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1303{
1304 u32 timeout = 1500;
1305 u32 reg = 0;
1306 struct dwc3_ep *dep = to_dwc3_ep(ep);
1307 struct dwc3 *dwc = dep->dwc;
1308 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1309
1310 while (dwc3_msm_read_reg_field(mdwc->base,
1311 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1312 if (!timeout--) {
1313 dev_err(mdwc->dev,
1314 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1315 return false;
1316 }
1317 }
1318 /* Check for U3 only if we are not handling Function Suspend */
1319 if (!f_suspend) {
1320 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1321 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1322 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1323 return false;
1324 }
1325 }
1326
1327 return true;
1328}
1329
1330
1331/**
1332* Performs GSI operations or GSI EP related operations.
1333*
1334* @usb_ep - pointer to usb_ep instance.
1335* @op_data - pointer to opcode related data.
1336* @op - GSI related or GSI EP related op code.
1337*
1338* @return int - 0 on success, negative on error.
1339* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1340*/
1341static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1342 void *op_data, enum gsi_ep_op op)
1343{
1344 u32 ret = 0;
1345 struct dwc3_ep *dep = to_dwc3_ep(ep);
1346 struct dwc3 *dwc = dep->dwc;
1347 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1348 struct usb_gsi_request *request;
1349 struct gsi_channel_info *ch_info;
1350 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001351 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001352
1353 switch (op) {
1354 case GSI_EP_OP_PREPARE_TRBS:
1355 request = (struct usb_gsi_request *)op_data;
1356 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1357 ret = gsi_prepare_trbs(ep, request);
1358 break;
1359 case GSI_EP_OP_FREE_TRBS:
1360 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1361 gsi_free_trbs(ep);
1362 break;
1363 case GSI_EP_OP_CONFIG:
1364 request = (struct usb_gsi_request *)op_data;
1365 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001366 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001367 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001368 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001369 break;
1370 case GSI_EP_OP_STARTXFER:
1371 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001372 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001373 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001374 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001375 break;
1376 case GSI_EP_OP_GET_XFER_IDX:
1377 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1378 ret = gsi_get_xfer_index(ep);
1379 break;
1380 case GSI_EP_OP_STORE_DBL_INFO:
1381 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1382 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1383 break;
1384 case GSI_EP_OP_ENABLE_GSI:
1385 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1386 gsi_enable(ep);
1387 break;
1388 case GSI_EP_OP_GET_CH_INFO:
1389 ch_info = (struct gsi_channel_info *)op_data;
1390 gsi_get_channel_info(ep, ch_info);
1391 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001392 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001393 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001394 dbg_print(0xFF, "RING_DB", 0, ep->name);
1395 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001396 break;
1397 case GSI_EP_OP_UPDATEXFER:
1398 request = (struct usb_gsi_request *)op_data;
1399 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001400 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001401 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001402 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001403 break;
1404 case GSI_EP_OP_ENDXFER:
1405 request = (struct usb_gsi_request *)op_data;
1406 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001407 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001408 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001409 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001410 break;
1411 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1412 block_db = *((bool *)op_data);
1413 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1414 block_db);
1415 gsi_set_clear_dbell(ep, block_db);
1416 break;
1417 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1418 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1419 f_suspend = *((bool *)op_data);
1420 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1421 break;
1422 case GSI_EP_OP_DISABLE:
1423 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1424 ret = ep->ops->disable(ep);
1425 break;
1426 default:
1427 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1428 }
1429
1430 return ret;
1431}
1432
1433/**
1434 * Configure MSM endpoint.
1435 * This function do specific configurations
1436 * to an endpoint which need specific implementaion
1437 * in the MSM architecture.
1438 *
1439 * This function should be called by usb function/class
1440 * layer which need a support from the specific MSM HW
1441 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1442 *
1443 * @ep - a pointer to some usb_ep instance
1444 *
1445 * @return int - 0 on success, negetive on error.
1446 */
1447int msm_ep_config(struct usb_ep *ep)
1448{
1449 struct dwc3_ep *dep = to_dwc3_ep(ep);
1450 struct dwc3 *dwc = dep->dwc;
1451 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1452 struct usb_ep_ops *new_ep_ops;
1453
1454
1455 /* Save original ep ops for future restore*/
1456 if (mdwc->original_ep_ops[dep->number]) {
1457 dev_err(mdwc->dev,
1458 "ep [%s,%d] already configured as msm endpoint\n",
1459 ep->name, dep->number);
1460 return -EPERM;
1461 }
1462 mdwc->original_ep_ops[dep->number] = ep->ops;
1463
1464 /* Set new usb ops as we like */
1465 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1466 if (!new_ep_ops)
1467 return -ENOMEM;
1468
1469 (*new_ep_ops) = (*ep->ops);
1470 new_ep_ops->queue = dwc3_msm_ep_queue;
1471 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1472 ep->ops = new_ep_ops;
1473
1474 /*
1475 * Do HERE more usb endpoint configurations
1476 * which are specific to MSM.
1477 */
1478
1479 return 0;
1480}
1481EXPORT_SYMBOL(msm_ep_config);
1482
1483/**
1484 * Un-configure MSM endpoint.
1485 * Tear down configurations done in the
1486 * dwc3_msm_ep_config function.
1487 *
1488 * @ep - a pointer to some usb_ep instance
1489 *
1490 * @return int - 0 on success, negative on error.
1491 */
1492int msm_ep_unconfig(struct usb_ep *ep)
1493{
1494 struct dwc3_ep *dep = to_dwc3_ep(ep);
1495 struct dwc3 *dwc = dep->dwc;
1496 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1497 struct usb_ep_ops *old_ep_ops;
1498
1499 /* Restore original ep ops */
1500 if (!mdwc->original_ep_ops[dep->number]) {
1501 dev_err(mdwc->dev,
1502 "ep [%s,%d] was not configured as msm endpoint\n",
1503 ep->name, dep->number);
1504 return -EINVAL;
1505 }
1506 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1507 ep->ops = mdwc->original_ep_ops[dep->number];
1508 mdwc->original_ep_ops[dep->number] = NULL;
1509 kfree(old_ep_ops);
1510
1511 /*
1512 * Do HERE more usb endpoint un-configurations
1513 * which are specific to MSM.
1514 */
1515
1516 return 0;
1517}
1518EXPORT_SYMBOL(msm_ep_unconfig);
1519#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1520
1521static void dwc3_resume_work(struct work_struct *w);
1522
1523static void dwc3_restart_usb_work(struct work_struct *w)
1524{
1525 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1526 restart_usb_work);
1527 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1528 unsigned int timeout = 50;
1529
1530 dev_dbg(mdwc->dev, "%s\n", __func__);
1531
1532 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1533 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1534 return;
1535 }
1536
1537 /* guard against concurrent VBUS handling */
1538 mdwc->in_restart = true;
1539
1540 if (!mdwc->vbus_active) {
1541 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1542 dwc->err_evt_seen = false;
1543 mdwc->in_restart = false;
1544 return;
1545 }
1546
Mayank Rana08e41922017-03-02 15:25:48 -08001547 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001548 /* Reset active USB connection */
1549 dwc3_resume_work(&mdwc->resume_work);
1550
1551 /* Make sure disconnect is processed before sending connect */
1552 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1553 msleep(20);
1554
1555 if (!timeout) {
1556 dev_dbg(mdwc->dev,
1557 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001558 dbg_event(0xFF, "ReStart:RT SUSP",
1559 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001560 pm_runtime_suspend(mdwc->dev);
1561 }
1562
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301563 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001564 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301565 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001566 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001567
1568 dwc->err_evt_seen = false;
1569 flush_delayed_work(&mdwc->sm_work);
1570}
1571
Manu Gautam976fdfc2016-08-18 09:27:35 +05301572static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1573 unsigned long action, void *priv)
1574{
1575 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1576 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1577 struct usb_bus *bus = priv;
1578
1579 /* Interested only in recovery when HC dies */
1580 if (action != USB_BUS_DIED)
1581 return 0;
1582
1583 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1584 /* Recovery already under process */
1585 if (mdwc->hc_died)
1586 return 0;
1587
1588 if (bus->controller != &dwc->xhci->dev) {
1589 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1590 return 0;
1591 }
1592
1593 mdwc->hc_died = true;
1594 schedule_delayed_work(&mdwc->sm_work, 0);
1595 return 0;
1596}
1597
1598
Mayank Rana511f3b22016-08-02 12:00:11 -07001599/*
1600 * Check whether the DWC3 requires resetting the ep
1601 * after going to Low Power Mode (lpm)
1602 */
1603bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1604{
1605 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1606 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1607
1608 return dbm_reset_ep_after_lpm(mdwc->dbm);
1609}
1610EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1611
1612/*
1613 * Config Global Distributed Switch Controller (GDSC)
1614 * to support controller power collapse
1615 */
1616static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1617{
1618 int ret;
1619
1620 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1621 return -EPERM;
1622
1623 if (on) {
1624 ret = regulator_enable(mdwc->dwc3_gdsc);
1625 if (ret) {
1626 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1627 return ret;
1628 }
1629 } else {
1630 ret = regulator_disable(mdwc->dwc3_gdsc);
1631 if (ret) {
1632 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1633 return ret;
1634 }
1635 }
1636
1637 return ret;
1638}
1639
1640static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1641{
1642 int ret = 0;
1643
1644 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001645 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001646 /* Using asynchronous block reset to the hardware */
1647 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1648 clk_disable_unprepare(mdwc->utmi_clk);
1649 clk_disable_unprepare(mdwc->sleep_clk);
1650 clk_disable_unprepare(mdwc->core_clk);
1651 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301652 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001653 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301654 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001655 } else {
1656 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301657 ret = reset_control_deassert(mdwc->core_reset);
1658 if (ret)
1659 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001660 ndelay(200);
1661 clk_prepare_enable(mdwc->iface_clk);
1662 clk_prepare_enable(mdwc->core_clk);
1663 clk_prepare_enable(mdwc->sleep_clk);
1664 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001665 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001666 }
1667
1668 return ret;
1669}
1670
1671static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1672{
1673 u32 guctl, gfladj = 0;
1674
1675 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1676 guctl &= ~DWC3_GUCTL_REFCLKPER;
1677
1678 /* GFLADJ register is used starting with revision 2.50a */
1679 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1680 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1681 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1682 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1683 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1684 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1685 }
1686
1687 /* Refer to SNPS Databook Table 6-55 for calculations used */
1688 switch (mdwc->utmi_clk_rate) {
1689 case 19200000:
1690 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1691 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1692 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1693 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1694 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1695 break;
1696 case 24000000:
1697 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1698 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1699 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1700 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1701 break;
1702 default:
1703 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1704 mdwc->utmi_clk_rate);
1705 break;
1706 }
1707
1708 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1709 if (gfladj)
1710 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1711}
1712
1713/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1714static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1715{
1716 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1717 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1718 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1719 BIT(2), 1);
1720
1721 /*
1722 * Enable master clock for RAMs to allow BAM to access RAMs when
1723 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1724 * are seen where RAM clocks get turned OFF in SS mode
1725 */
1726 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1727 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1728
1729}
1730
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001731static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1732{
1733 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1734 vbus_draw_work);
1735 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1736
1737 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1738}
1739
Mayank Rana511f3b22016-08-02 12:00:11 -07001740static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1741{
1742 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001743 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001744 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001745 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001746
1747 switch (event) {
1748 case DWC3_CONTROLLER_ERROR_EVENT:
1749 dev_info(mdwc->dev,
1750 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1751 dwc->irq_cnt);
1752
1753 dwc3_gadget_disable_irq(dwc);
1754
1755 /* prevent core from generating interrupts until recovery */
1756 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1757 reg |= DWC3_GCTL_CORESOFTRESET;
1758 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1759
1760 /* restart USB which performs full reset and reconnect */
1761 schedule_work(&mdwc->restart_usb_work);
1762 break;
1763 case DWC3_CONTROLLER_RESET_EVENT:
1764 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1765 /* HS & SSPHYs get reset as part of core soft reset */
1766 dwc3_msm_qscratch_reg_init(mdwc);
1767 break;
1768 case DWC3_CONTROLLER_POST_RESET_EVENT:
1769 dev_dbg(mdwc->dev,
1770 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1771
1772 /*
1773 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301774 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001775 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301776 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1777 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001778 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1779 dwc3_msm_read_reg(mdwc->base,
1780 QSCRATCH_GENERAL_CFG)
1781 | PIPE_UTMI_CLK_DIS);
1782
1783 usleep_range(2, 5);
1784
1785
1786 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1787 dwc3_msm_read_reg(mdwc->base,
1788 QSCRATCH_GENERAL_CFG)
1789 | PIPE_UTMI_CLK_SEL
1790 | PIPE3_PHYSTATUS_SW);
1791
1792 usleep_range(2, 5);
1793
1794 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1795 dwc3_msm_read_reg(mdwc->base,
1796 QSCRATCH_GENERAL_CFG)
1797 & ~PIPE_UTMI_CLK_DIS);
1798 }
1799
1800 dwc3_msm_update_ref_clk(mdwc);
1801 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1802 break;
1803 case DWC3_CONTROLLER_CONNDONE_EVENT:
1804 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1805 /*
1806 * Add power event if the dbm indicates coming out of L1 by
1807 * interrupt
1808 */
1809 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1810 dwc3_msm_write_reg_field(mdwc->base,
1811 PWR_EVNT_IRQ_MASK_REG,
1812 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1813
1814 atomic_set(&dwc->in_lpm, 0);
1815 break;
1816 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1817 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1818 if (dwc->enable_bus_suspend) {
1819 mdwc->suspend = dwc->b_suspend;
1820 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1821 }
1822 break;
1823 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1824 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001825 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001826 break;
1827 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1828 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001829 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001830 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001831 case DWC3_GSI_EVT_BUF_ALLOC:
1832 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1833
1834 if (!mdwc->num_gsi_event_buffers)
1835 break;
1836
1837 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1838 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1839 GFP_KERNEL);
1840 if (!mdwc->gsi_ev_buff) {
1841 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1842 break;
1843 }
1844
1845 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1846
1847 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1848 if (!evt)
1849 break;
1850 evt->dwc = dwc;
1851 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1852 evt->buf = dma_alloc_coherent(dwc->dev,
1853 DWC3_EVENT_BUFFERS_SIZE,
1854 &evt->dma, GFP_KERNEL);
1855 if (!evt->buf) {
1856 dev_err(dwc->dev,
1857 "can't allocate gsi_evt_buf(%d)\n", i);
1858 break;
1859 }
1860 mdwc->gsi_ev_buff[i] = evt;
1861 }
1862 break;
1863 case DWC3_GSI_EVT_BUF_SETUP:
1864 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1865 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1866 evt = mdwc->gsi_ev_buff[i];
1867 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1868 evt->buf, (unsigned long long) evt->dma,
1869 evt->length);
1870 memset(evt->buf, 0, evt->length);
1871 evt->lpos = 0;
1872 /*
1873 * Primary event buffer is programmed with registers
1874 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1875 * program USB GSI related event buffer with DWC3
1876 * controller.
1877 */
1878 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1879 lower_32_bits(evt->dma));
1880 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1881 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1882 DWC3_GEVENT_TYPE_GSI) |
1883 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1884 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1885 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1886 ((evt->length) & 0xffff));
1887 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1888 }
1889 break;
1890 case DWC3_GSI_EVT_BUF_CLEANUP:
1891 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1892 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1893 evt = mdwc->gsi_ev_buff[i];
1894 evt->lpos = 0;
1895 /*
1896 * Primary event buffer is programmed with registers
1897 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1898 * program USB GSI related event buffer with DWC3
1899 * controller.
1900 */
1901 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1902 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1903 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1904 DWC3_GEVNTSIZ_INTMASK |
1905 DWC3_GEVNTSIZ_SIZE((i+1)));
1906 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1907 }
1908 break;
1909 case DWC3_GSI_EVT_BUF_FREE:
1910 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1911 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1912 evt = mdwc->gsi_ev_buff[i];
1913 if (evt)
1914 dma_free_coherent(dwc->dev, evt->length,
1915 evt->buf, evt->dma);
1916 }
1917 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001918 default:
1919 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1920 break;
1921 }
1922}
1923
1924static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1925{
1926 int ret = 0;
1927
1928 if (core_reset) {
1929 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1930 if (ret)
1931 return;
1932
1933 usleep_range(1000, 1200);
1934 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1935 if (ret)
1936 return;
1937
1938 usleep_range(10000, 12000);
1939 }
1940
1941 if (mdwc->dbm) {
1942 /* Reset the DBM */
1943 dbm_soft_reset(mdwc->dbm, 1);
1944 usleep_range(1000, 1200);
1945 dbm_soft_reset(mdwc->dbm, 0);
1946
1947 /*enable DBM*/
1948 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1949 DBM_EN_MASK, 0x1);
1950 dbm_enable(mdwc->dbm);
1951 }
1952}
1953
1954static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1955{
1956 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1957 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301958 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001959
1960 /* Configure AHB2PHY for one wait state read/write */
1961 if (mdwc->ahb2phy_base) {
1962 clk_prepare_enable(mdwc->cfg_ahb_clk);
1963 val = readl_relaxed(mdwc->ahb2phy_base +
1964 PERIPH_SS_AHB2PHY_TOP_CFG);
1965 if (val != ONE_READ_WRITE_WAIT) {
1966 writel_relaxed(ONE_READ_WRITE_WAIT,
1967 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1968 /* complete above write before configuring USB PHY. */
1969 mb();
1970 }
1971 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1972 }
1973
1974 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001975 dbg_event(0xFF, "dwc3 init",
1976 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301977 ret = dwc3_core_pre_init(dwc);
1978 if (ret) {
1979 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1980 return;
1981 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001982 mdwc->init = true;
1983 }
1984
1985 dwc3_core_init(dwc);
1986 /* Re-configure event buffers */
1987 dwc3_event_buffers_setup(dwc);
1988}
1989
1990static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1991{
1992 unsigned long timeout;
1993 u32 reg = 0;
1994
1995 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301996 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001997 if (!atomic_read(&mdwc->in_p3)) {
1998 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1999 return -EBUSY;
2000 }
2001 }
2002
2003 /* Clear previous L2 events */
2004 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2005 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2006
2007 /* Prepare HSPHY for suspend */
2008 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2009 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2010 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2011
2012 /* Wait for PHY to go into L2 */
2013 timeout = jiffies + msecs_to_jiffies(5);
2014 while (!time_after(jiffies, timeout)) {
2015 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2016 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2017 break;
2018 }
2019 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2020 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2021
2022 /* Clear L2 event bit */
2023 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2024 PWR_EVNT_LPM_IN_L2_MASK);
2025
2026 return 0;
2027}
2028
Mayank Rana511f3b22016-08-02 12:00:11 -07002029static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2030{
2031 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2032 int i, num_ports;
2033 u32 reg;
2034
2035 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2036 if (mdwc->in_host_mode) {
2037 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2038 num_ports = HCS_MAX_PORTS(reg);
2039 for (i = 0; i < num_ports; i++) {
2040 reg = dwc3_msm_read_reg(mdwc->base,
2041 USB3_PORTSC + i*0x10);
2042 if (reg & PORT_PE) {
2043 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2044 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2045 else if (DEV_LOWSPEED(reg))
2046 mdwc->hs_phy->flags |= PHY_LS_MODE;
2047 }
2048 }
2049 } else {
2050 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2051 dwc->gadget.speed == USB_SPEED_FULL)
2052 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2053 else if (dwc->gadget.speed == USB_SPEED_LOW)
2054 mdwc->hs_phy->flags |= PHY_LS_MODE;
2055 }
2056}
2057
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302058static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2059 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002060
Mayank Ranad339abe2017-05-31 09:19:49 -07002061static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2062 struct usb_irq *uirq, unsigned int polarity, bool enable)
2063{
2064 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2065
2066 if (uirq && enable && !uirq->enable) {
2067 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2068 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2069 /* clear any pending interrupt */
2070 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2071 irq_set_irq_type(uirq->irq, polarity);
2072 enable_irq_wake(uirq->irq);
2073 enable_irq(uirq->irq);
2074 uirq->enable = true;
2075 }
2076
2077 if (uirq && !enable && uirq->enable) {
2078 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2079 disable_irq_wake(uirq->irq);
2080 disable_irq_nosync(uirq->irq);
2081 uirq->enable = false;
2082 }
2083}
2084
2085static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2086{
2087 if (!enable)
2088 goto disable_usb_irq;
2089
2090 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2091 configure_usb_wakeup_interrupt(mdwc,
2092 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2093 IRQ_TYPE_EDGE_FALLING, enable);
2094 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2095 configure_usb_wakeup_interrupt(mdwc,
2096 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2097 IRQ_TYPE_EDGE_FALLING, enable);
2098 } else {
2099 configure_usb_wakeup_interrupt(mdwc,
2100 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2101 IRQ_TYPE_EDGE_RISING, true);
2102 configure_usb_wakeup_interrupt(mdwc,
2103 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2104 IRQ_TYPE_EDGE_RISING, true);
2105 }
2106
2107 configure_usb_wakeup_interrupt(mdwc,
2108 &mdwc->wakeup_irq[SS_PHY_IRQ],
2109 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2110 return;
2111
2112disable_usb_irq:
2113 configure_usb_wakeup_interrupt(mdwc,
2114 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2115 configure_usb_wakeup_interrupt(mdwc,
2116 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2117 configure_usb_wakeup_interrupt(mdwc,
2118 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2119}
2120
2121static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2122 struct usb_irq *uirq, bool enable)
2123{
2124 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2125
2126 if (uirq && enable && !uirq->enable) {
2127 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2128 enable_irq_wake(uirq->irq);
2129 enable_irq(uirq->irq);
2130 uirq->enable = true;
2131 }
2132
2133 if (uirq && !enable && uirq->enable) {
2134 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2135 disable_irq_wake(uirq->irq);
2136 disable_irq_nosync(uirq->irq);
2137 uirq->enable = true;
2138 }
2139}
2140
Mayank Rana511f3b22016-08-02 12:00:11 -07002141static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2142{
Mayank Rana83ad5822016-08-09 14:17:22 -07002143 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002144 bool can_suspend_ssphy;
2145 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002146 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002147 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002148
2149 if (atomic_read(&dwc->in_lpm)) {
2150 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
2151 return 0;
2152 }
2153
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302154 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2155 msm_dwc3_perf_vote_update(mdwc, false);
2156
Mayank Rana511f3b22016-08-02 12:00:11 -07002157 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002158 evt = dwc->ev_buf;
2159 if ((evt->flags & DWC3_EVENT_PENDING)) {
2160 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002161 "%s: %d device events pending, abort suspend\n",
2162 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07002163 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002164 }
2165 }
2166
2167 if (!mdwc->vbus_active && dwc->is_drd &&
2168 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2169 /*
2170 * In some cases, the pm_runtime_suspend may be called by
2171 * usb_bam when there is pending lpm flag. However, if this is
2172 * done when cable was disconnected and otg state has not
2173 * yet changed to IDLE, then it means OTG state machine
2174 * is running and we race against it. So cancel LPM for now,
2175 * and OTG state machine will go for LPM later, after completing
2176 * transition to IDLE state.
2177 */
2178 dev_dbg(mdwc->dev,
2179 "%s: cable disconnected while not in idle otg state\n",
2180 __func__);
2181 return -EBUSY;
2182 }
2183
2184 /*
2185 * Check if device is not in CONFIGURED state
2186 * then check controller state of L2 and break
2187 * LPM sequence. Check this for device bus suspend case.
2188 */
2189 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2190 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2191 pr_err("%s(): Trying to go in LPM with state:%d\n",
2192 __func__, dwc->gadget.state);
2193 pr_err("%s(): LPM is not performed.\n", __func__);
2194 return -EBUSY;
2195 }
2196
2197 ret = dwc3_msm_prepare_suspend(mdwc);
2198 if (ret)
2199 return ret;
2200
2201 /* Initialize variables here */
2202 can_suspend_ssphy = !(mdwc->in_host_mode &&
2203 dwc3_msm_is_host_superspeed(mdwc));
2204
2205 /* Disable core irq */
2206 if (dwc->irq)
2207 disable_irq(dwc->irq);
2208
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002209 if (work_busy(&dwc->bh_work))
2210 dbg_event(0xFF, "pend evt", 0);
2211
Mayank Rana511f3b22016-08-02 12:00:11 -07002212 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002213 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002214
2215 dwc3_set_phy_speed_flags(mdwc);
2216 /* Suspend HS PHY */
2217 usb_phy_set_suspend(mdwc->hs_phy, 1);
2218
2219 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002220 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002221 /* indicate phy about SS mode */
2222 if (dwc3_msm_is_superspeed(mdwc))
2223 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2224 usb_phy_set_suspend(mdwc->ss_phy, 1);
2225 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2226 }
2227
2228 /* make sure above writes are completed before turning off clocks */
2229 wmb();
2230
2231 /* Disable clocks */
2232 if (mdwc->bus_aggr_clk)
2233 clk_disable_unprepare(mdwc->bus_aggr_clk);
2234 clk_disable_unprepare(mdwc->utmi_clk);
2235
Hemant Kumar633dc332016-08-10 13:41:05 -07002236 /* Memory core: OFF, Memory periphery: OFF */
2237 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2238 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2239 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2240 }
2241
Mayank Rana511f3b22016-08-02 12:00:11 -07002242 clk_set_rate(mdwc->core_clk, 19200000);
2243 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302244 if (mdwc->noc_aggr_clk)
2245 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002246 /*
2247 * Disable iface_clk only after core_clk as core_clk has FSM
2248 * depedency on iface_clk. Hence iface_clk should be turned off
2249 * after core_clk is turned off.
2250 */
2251 clk_disable_unprepare(mdwc->iface_clk);
2252 /* USB PHY no more requires TCXO */
2253 clk_disable_unprepare(mdwc->xo_clk);
2254
2255 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002256 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002257 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2258 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2259 dwc3_msm_config_gdsc(mdwc, 0);
2260 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002261
Jack Pham9faa51df2017-04-03 18:13:40 -07002262 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002263 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002264 dev_dbg(mdwc->dev, "IOMMU detached\n");
2265 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002266 }
2267
2268 /* Remove bus voting */
2269 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002270 dbg_event(0xFF, "bus_devote_start", 0);
2271 ret = msm_bus_scale_client_update_request(
2272 mdwc->bus_perf_client, 0);
2273 dbg_event(0xFF, "bus_devote_finish", 0);
2274 if (ret)
2275 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002276 }
2277
2278 /*
2279 * release wakeup source with timeout to defer system suspend to
2280 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2281 * event is received.
2282 */
2283 if (mdwc->lpm_to_suspend_delay) {
2284 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2285 mdwc->lpm_to_suspend_delay);
2286 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2287 } else {
2288 pm_relax(mdwc->dev);
2289 }
2290
2291 atomic_set(&dwc->in_lpm, 1);
2292
2293 /*
2294 * with DCP or during cable disconnect, we dont require wakeup
2295 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2296 * case of host bus suspend and device bus suspend.
2297 */
2298 if (mdwc->vbus_active || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002299 if (mdwc->use_pdc_interrupts) {
2300 enable_usb_pdc_interrupt(mdwc, true);
2301 } else {
2302 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2303 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2304 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2305 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002306 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002307 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2308 }
2309
2310 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2311 return 0;
2312}
2313
2314static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2315{
2316 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002317 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002318 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002319 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002320
2321 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2322
2323 if (!atomic_read(&dwc->in_lpm)) {
2324 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2325 return 0;
2326 }
2327
2328 pm_stay_awake(mdwc->dev);
2329
2330 /* Enable bus voting */
2331 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002332 dbg_event(0xFF, "bus_vote_start", 1);
2333 ret = msm_bus_scale_client_update_request(
2334 mdwc->bus_perf_client, 1);
2335 dbg_event(0xFF, "bus_vote_finish", 1);
2336 if (ret)
2337 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002338 }
2339
2340 /* Vote for TCXO while waking up USB HSPHY */
2341 ret = clk_prepare_enable(mdwc->xo_clk);
2342 if (ret)
2343 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2344 __func__, ret);
2345
2346 /* Restore controller power collapse */
2347 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2348 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2349 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302350 ret = reset_control_assert(mdwc->core_reset);
2351 if (ret)
2352 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2353 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002354 /* HW requires a short delay for reset to take place properly */
2355 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302356 ret = reset_control_deassert(mdwc->core_reset);
2357 if (ret)
2358 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2359 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002360 clk_prepare_enable(mdwc->sleep_clk);
2361 }
2362
2363 /*
2364 * Enable clocks
2365 * Turned ON iface_clk before core_clk due to FSM depedency.
2366 */
2367 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302368 if (mdwc->noc_aggr_clk)
2369 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002370
2371 core_clk_rate = mdwc->core_clk_rate;
2372 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2373 core_clk_rate = mdwc->core_clk_rate_hs;
2374 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2375 core_clk_rate);
2376 }
2377
2378 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002379 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002380
2381 /* set Memory core: ON, Memory periphery: ON */
2382 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2383 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2384
Mayank Rana511f3b22016-08-02 12:00:11 -07002385 clk_prepare_enable(mdwc->utmi_clk);
2386 if (mdwc->bus_aggr_clk)
2387 clk_prepare_enable(mdwc->bus_aggr_clk);
2388
2389 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002390 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2391 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002392 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2393 if (mdwc->typec_orientation == ORIENTATION_CC1)
2394 mdwc->ss_phy->flags |= PHY_LANE_A;
2395 if (mdwc->typec_orientation == ORIENTATION_CC2)
2396 mdwc->ss_phy->flags |= PHY_LANE_B;
2397 usb_phy_set_suspend(mdwc->ss_phy, 0);
2398 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2399 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2400 }
2401
2402 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2403 /* Resume HS PHY */
2404 usb_phy_set_suspend(mdwc->hs_phy, 0);
2405
2406 /* Recover from controller power collapse */
2407 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2408 u32 tmp;
2409
Jack Pham9faa51df2017-04-03 18:13:40 -07002410 if (mdwc->iommu_map) {
2411 ret = arm_iommu_attach_device(mdwc->dev,
2412 mdwc->iommu_map);
2413 if (ret)
2414 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2415 ret);
2416 else
2417 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2418 }
2419
Mayank Rana511f3b22016-08-02 12:00:11 -07002420 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2421
2422 dwc3_msm_power_collapse_por(mdwc);
2423
2424 /* Get initial P3 status and enable IN_P3 event */
2425 tmp = dwc3_msm_read_reg_field(mdwc->base,
2426 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2427 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2428 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2429 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2430
2431 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2432 }
2433
2434 atomic_set(&dwc->in_lpm, 0);
2435
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302436 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002437 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302438
Mayank Rana511f3b22016-08-02 12:00:11 -07002439 /* Disable HSPHY auto suspend */
2440 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2441 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2442 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2443 DWC3_GUSB2PHYCFG_SUSPHY));
2444
2445 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2446 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002447 if (mdwc->use_pdc_interrupts) {
2448 enable_usb_pdc_interrupt(mdwc, false);
2449 } else {
2450 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2451 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2452 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2453 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002454 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002455 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2456 }
2457
2458 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2459
Mayank Rana511f3b22016-08-02 12:00:11 -07002460 /* Enable core irq */
2461 if (dwc->irq)
2462 enable_irq(dwc->irq);
2463
2464 /*
2465 * Handle other power events that could not have been handled during
2466 * Low Power Mode
2467 */
2468 dwc3_pwr_event_handler(mdwc);
2469
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302470 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2471 schedule_delayed_work(&mdwc->perf_vote_work,
2472 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2473
Mayank Rana08e41922017-03-02 15:25:48 -08002474 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002475 return 0;
2476}
2477
2478/**
2479 * dwc3_ext_event_notify - callback to handle events from external transceiver
2480 *
2481 * Returns 0 on success
2482 */
2483static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2484{
2485 /* Flush processing any pending events before handling new ones */
2486 flush_delayed_work(&mdwc->sm_work);
2487
2488 if (mdwc->id_state == DWC3_ID_FLOAT) {
2489 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2490 set_bit(ID, &mdwc->inputs);
2491 } else {
2492 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2493 clear_bit(ID, &mdwc->inputs);
2494 }
2495
2496 if (mdwc->vbus_active && !mdwc->in_restart) {
2497 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2498 set_bit(B_SESS_VLD, &mdwc->inputs);
2499 } else {
2500 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2501 clear_bit(B_SESS_VLD, &mdwc->inputs);
2502 }
2503
2504 if (mdwc->suspend) {
2505 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2506 set_bit(B_SUSPEND, &mdwc->inputs);
2507 } else {
2508 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2509 clear_bit(B_SUSPEND, &mdwc->inputs);
2510 }
2511
2512 schedule_delayed_work(&mdwc->sm_work, 0);
2513}
2514
2515static void dwc3_resume_work(struct work_struct *w)
2516{
2517 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002518 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002519 union extcon_property_value val;
2520 unsigned int extcon_id;
2521 struct extcon_dev *edev = NULL;
2522 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002523
2524 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2525
Jack Pham4e9dff72017-04-04 18:05:53 -07002526 if (mdwc->vbus_active) {
2527 edev = mdwc->extcon_vbus;
2528 extcon_id = EXTCON_USB;
2529 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2530 edev = mdwc->extcon_id;
2531 extcon_id = EXTCON_USB_HOST;
2532 }
2533
2534 /* Check speed and Type-C polarity values in order to configure PHY */
2535 if (edev && extcon_get_state(edev, extcon_id)) {
2536 ret = extcon_get_property(edev, extcon_id,
2537 EXTCON_PROP_USB_SS, &val);
2538
2539 /* Use default dwc->maximum_speed if speed isn't reported */
2540 if (!ret)
2541 dwc->maximum_speed = (val.intval == 0) ?
2542 USB_SPEED_HIGH : USB_SPEED_SUPER;
2543
2544 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2545 dwc->maximum_speed = dwc->max_hw_supp_speed;
2546
Mayank Ranaf70d8212017-06-12 14:02:07 -07002547 if (override_usb_speed &&
2548 is_valid_usb_speed(dwc, override_usb_speed)) {
2549 dwc->maximum_speed = override_usb_speed;
2550 dbg_event(0xFF, "override_speed", override_usb_speed);
2551 }
2552
Jack Pham4e9dff72017-04-04 18:05:53 -07002553 dbg_event(0xFF, "speed", dwc->maximum_speed);
2554
2555 ret = extcon_get_property(edev, extcon_id,
2556 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2557 if (ret)
2558 mdwc->typec_orientation = ORIENTATION_NONE;
2559 else
2560 mdwc->typec_orientation = val.intval ?
2561 ORIENTATION_CC2 : ORIENTATION_CC1;
2562
2563 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2564 }
2565
Mayank Rana511f3b22016-08-02 12:00:11 -07002566 /*
2567 * exit LPM first to meet resume timeline from device side.
2568 * resume_pending flag would prevent calling
2569 * dwc3_msm_resume() in case we are here due to system
2570 * wide resume without usb cable connected. This flag is set
2571 * only in case of power event irq in lpm.
2572 */
2573 if (mdwc->resume_pending) {
2574 dwc3_msm_resume(mdwc);
2575 mdwc->resume_pending = false;
2576 }
2577
Mayank Rana08e41922017-03-02 15:25:48 -08002578 if (atomic_read(&mdwc->pm_suspended)) {
2579 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002580 /* let pm resume kick in resume work later */
2581 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002582 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002583 dwc3_ext_event_notify(mdwc);
2584}
2585
2586static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2587{
2588 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2589 u32 irq_stat, irq_clear = 0;
2590
2591 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2592 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2593
2594 /* Check for P3 events */
2595 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2596 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2597 /* Can't tell if entered or exit P3, so check LINKSTATE */
2598 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2599 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2600 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2601 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2602
2603 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2604 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2605 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2606 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2607 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2608 atomic_set(&mdwc->in_p3, 0);
2609 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2610 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2611 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2612 atomic_set(&mdwc->in_p3, 1);
2613 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2614 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2615 }
2616
2617 /* Clear L2 exit */
2618 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2619 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2620 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2621 }
2622
2623 /* Handle exit from L1 events */
2624 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2625 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2626 __func__);
2627 if (usb_gadget_wakeup(&dwc->gadget))
2628 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2629 __func__);
2630 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2631 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2632 }
2633
2634 /* Unhandled events */
2635 if (irq_stat)
2636 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2637 __func__, irq_stat);
2638
2639 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2640}
2641
2642static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2643{
2644 struct dwc3_msm *mdwc = _mdwc;
2645 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2646
2647 dev_dbg(mdwc->dev, "%s\n", __func__);
2648
2649 if (atomic_read(&dwc->in_lpm))
2650 dwc3_resume_work(&mdwc->resume_work);
2651 else
2652 dwc3_pwr_event_handler(mdwc);
2653
Mayank Rana08e41922017-03-02 15:25:48 -08002654 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002655 return IRQ_HANDLED;
2656}
2657
2658static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2659{
2660 struct dwc3_msm *mdwc = data;
2661 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2662
2663 dwc->t_pwr_evt_irq = ktime_get();
2664 dev_dbg(mdwc->dev, "%s received\n", __func__);
2665 /*
2666 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2667 * which interrupts have been triggered, as the clocks are disabled.
2668 * Resume controller by waking up pwr event irq thread.After re-enabling
2669 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2670 * all other power events.
2671 */
2672 if (atomic_read(&dwc->in_lpm)) {
2673 /* set this to call dwc3_msm_resume() */
2674 mdwc->resume_pending = true;
2675 return IRQ_WAKE_THREAD;
2676 }
2677
2678 dwc3_pwr_event_handler(mdwc);
2679 return IRQ_HANDLED;
2680}
2681
2682static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2683 unsigned long action, void *hcpu)
2684{
2685 uint32_t cpu = (uintptr_t)hcpu;
2686 struct dwc3_msm *mdwc =
2687 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2688
2689 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2690 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2691 cpu_to_affin, mdwc->irq_to_affin);
2692 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2693 }
2694
2695 return NOTIFY_OK;
2696}
2697
2698static void dwc3_otg_sm_work(struct work_struct *w);
2699
2700static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2701{
2702 int ret;
2703
2704 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2705 if (IS_ERR(mdwc->dwc3_gdsc))
2706 mdwc->dwc3_gdsc = NULL;
2707
2708 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2709 if (IS_ERR(mdwc->xo_clk)) {
2710 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2711 __func__);
2712 ret = PTR_ERR(mdwc->xo_clk);
2713 return ret;
2714 }
2715 clk_set_rate(mdwc->xo_clk, 19200000);
2716
2717 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2718 if (IS_ERR(mdwc->iface_clk)) {
2719 dev_err(mdwc->dev, "failed to get iface_clk\n");
2720 ret = PTR_ERR(mdwc->iface_clk);
2721 return ret;
2722 }
2723
2724 /*
2725 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2726 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2727 * On newer platform it can run at 150MHz as well.
2728 */
2729 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2730 if (IS_ERR(mdwc->core_clk)) {
2731 dev_err(mdwc->dev, "failed to get core_clk\n");
2732 ret = PTR_ERR(mdwc->core_clk);
2733 return ret;
2734 }
2735
Amit Nischal4d278212016-06-06 17:54:34 +05302736 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2737 if (IS_ERR(mdwc->core_reset)) {
2738 dev_err(mdwc->dev, "failed to get core_reset\n");
2739 return PTR_ERR(mdwc->core_reset);
2740 }
2741
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302742 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302743 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302744 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2745 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302746 }
2747
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302748 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302749 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302750 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2751 mdwc->core_clk_rate);
2752 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2753 if (ret)
2754 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002755
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002756 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2757 (u32 *)&mdwc->core_clk_rate_hs)) {
2758 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2759 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2760 }
2761
Mayank Rana511f3b22016-08-02 12:00:11 -07002762 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2763 if (IS_ERR(mdwc->sleep_clk)) {
2764 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2765 ret = PTR_ERR(mdwc->sleep_clk);
2766 return ret;
2767 }
2768
2769 clk_set_rate(mdwc->sleep_clk, 32000);
2770 mdwc->utmi_clk_rate = 19200000;
2771 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2772 if (IS_ERR(mdwc->utmi_clk)) {
2773 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2774 ret = PTR_ERR(mdwc->utmi_clk);
2775 return ret;
2776 }
2777
2778 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2779 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2780 if (IS_ERR(mdwc->bus_aggr_clk))
2781 mdwc->bus_aggr_clk = NULL;
2782
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302783 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2784 if (IS_ERR(mdwc->noc_aggr_clk))
2785 mdwc->noc_aggr_clk = NULL;
2786
Mayank Rana511f3b22016-08-02 12:00:11 -07002787 if (of_property_match_string(mdwc->dev->of_node,
2788 "clock-names", "cfg_ahb_clk") >= 0) {
2789 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2790 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2791 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2792 mdwc->cfg_ahb_clk = NULL;
2793 if (ret != -EPROBE_DEFER)
2794 dev_err(mdwc->dev,
2795 "failed to get cfg_ahb_clk ret %d\n",
2796 ret);
2797 return ret;
2798 }
2799 }
2800
2801 return 0;
2802}
2803
2804static int dwc3_msm_id_notifier(struct notifier_block *nb,
2805 unsigned long event, void *ptr)
2806{
2807 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002808 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002809 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002810
2811 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2812
2813 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2814
Mayank Rana511f3b22016-08-02 12:00:11 -07002815 if (mdwc->id_state != id) {
2816 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002817 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002818 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2819 }
2820
Mayank Rana511f3b22016-08-02 12:00:11 -07002821 return NOTIFY_DONE;
2822}
2823
Hemant Kumar006fae42017-07-12 18:11:25 -07002824
2825static void check_for_sdp_connection(struct work_struct *w)
2826{
Hemant Kumar006fae42017-07-12 18:11:25 -07002827 struct dwc3_msm *mdwc =
2828 container_of(w, struct dwc3_msm, sdp_check.work);
2829 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2830
2831 if (!mdwc->vbus_active)
2832 return;
2833
2834 /* floating D+/D- lines detected */
2835 if (dwc->gadget.state < USB_STATE_DEFAULT &&
2836 dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
Hemant Kumar006fae42017-07-12 18:11:25 -07002837 mdwc->vbus_active = 0;
2838 dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
2839 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2840 }
2841}
2842
Mayank Rana511f3b22016-08-02 12:00:11 -07002843static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2844 unsigned long event, void *ptr)
2845{
2846 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2847 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002848
2849 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2850
2851 if (mdwc->vbus_active == event)
2852 return NOTIFY_DONE;
2853
Mayank Rana511f3b22016-08-02 12:00:11 -07002854 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002855 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002856 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002857
Mayank Rana511f3b22016-08-02 12:00:11 -07002858 return NOTIFY_DONE;
2859}
Jack Pham4e9dff72017-04-04 18:05:53 -07002860
Mayank Rana51958172017-02-28 14:49:21 -08002861/*
2862 * Handle EUD based soft detach/attach event, and force USB high speed mode
2863 * functionality on receiving soft attach event.
2864 *
2865 * @nb - notifier handler
2866 * @event - event information i.e. soft detach/attach event
2867 * @ptr - extcon_dev pointer
2868 *
2869 * @return int - NOTIFY_DONE always due to EUD
2870 */
2871static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2872 unsigned long event, void *ptr)
2873{
2874 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2875 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002876
2877 dbg_event(0xFF, "EUD_NB", event);
2878 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2879 if (mdwc->vbus_active == event)
2880 return NOTIFY_DONE;
2881
2882 /* Force USB High-Speed enumeration Only */
2883 dwc->maximum_speed = USB_SPEED_HIGH;
2884 dbg_event(0xFF, "Speed", dwc->maximum_speed);
2885 mdwc->vbus_active = event;
2886 if (dwc->is_drd && !mdwc->in_restart)
2887 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002888
Mayank Rana51958172017-02-28 14:49:21 -08002889 return NOTIFY_DONE;
2890}
Mayank Rana511f3b22016-08-02 12:00:11 -07002891
2892static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2893{
2894 struct device_node *node = mdwc->dev->of_node;
2895 struct extcon_dev *edev;
2896 int ret = 0;
2897
2898 if (!of_property_read_bool(node, "extcon"))
2899 return 0;
2900
Mayank Rana51958172017-02-28 14:49:21 -08002901 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002902 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2903 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2904 return PTR_ERR(edev);
2905
2906 if (!IS_ERR(edev)) {
2907 mdwc->extcon_vbus = edev;
2908 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2909 ret = extcon_register_notifier(edev, EXTCON_USB,
2910 &mdwc->vbus_nb);
2911 if (ret < 0) {
2912 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2913 return ret;
2914 }
2915 }
2916
Mayank Rana51958172017-02-28 14:49:21 -08002917 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002918 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2919 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2920 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2921 ret = PTR_ERR(edev);
2922 goto err;
2923 }
2924 }
2925
2926 if (!IS_ERR(edev)) {
2927 mdwc->extcon_id = edev;
2928 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2929 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2930 &mdwc->id_nb);
2931 if (ret < 0) {
2932 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2933 goto err;
2934 }
2935 }
2936
Mayank Rana81bd2e52017-07-26 16:15:15 -07002937 edev = NULL;
Mayank Rana51958172017-02-28 14:49:21 -08002938 /* Use third phandle (optional) for EUD based detach/attach events */
2939 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2940 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2941 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2942 ret = PTR_ERR(edev);
2943 goto err;
2944 }
2945 }
2946
Mayank Rana81bd2e52017-07-26 16:15:15 -07002947 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002948 mdwc->extcon_eud = edev;
2949 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2950 ret = extcon_register_notifier(edev, EXTCON_USB,
2951 &mdwc->eud_event_nb);
2952 if (ret < 0) {
2953 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
2954 goto err1;
2955 }
2956 }
2957
Mayank Rana511f3b22016-08-02 12:00:11 -07002958 return 0;
Mayank Rana51958172017-02-28 14:49:21 -08002959err1:
2960 if (mdwc->extcon_id)
2961 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2962 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002963err:
2964 if (mdwc->extcon_vbus)
2965 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2966 &mdwc->vbus_nb);
2967 return ret;
2968}
2969
Jack Phambbe27962017-03-23 18:42:26 -07002970#define SMMU_BASE 0x10000000 /* Device address range base */
2971#define SMMU_SIZE 0x40000000 /* Device address range size */
2972
2973static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2974{
2975 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07002976 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07002977 int ret;
2978
2979 if (!of_property_read_bool(node, "iommus"))
2980 return 0;
2981
2982 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
2983 SMMU_BASE, SMMU_SIZE);
2984 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
2985 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
2986 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
2987 ret);
2988 return ret;
2989 }
2990 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
2991
2992 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
2993 &atomic_ctx);
2994 if (ret) {
2995 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
2996 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07002997 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07002998 }
2999
Jack Pham283cece2017-04-05 09:58:17 -07003000 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
3001 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3002 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
3003 if (ret) {
3004 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
3005 s1_bypass, ret);
3006 goto release_mapping;
3007 }
3008
Jack Pham9faa51df2017-04-03 18:13:40 -07003009 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
3010 if (ret) {
3011 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
3012 goto release_mapping;
3013 }
3014 dev_dbg(mdwc->dev, "attached to IOMMU\n");
3015
Jack Phambbe27962017-03-23 18:42:26 -07003016 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07003017
3018release_mapping:
3019 arm_iommu_release_mapping(mdwc->iommu_map);
3020 mdwc->iommu_map = NULL;
3021 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003022}
3023
Mayank Rana511f3b22016-08-02 12:00:11 -07003024static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3025 char *buf)
3026{
3027 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3028
3029 if (mdwc->vbus_active)
3030 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3031 if (mdwc->id_state == DWC3_ID_GROUND)
3032 return snprintf(buf, PAGE_SIZE, "host\n");
3033
3034 return snprintf(buf, PAGE_SIZE, "none\n");
3035}
3036
3037static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3038 const char *buf, size_t count)
3039{
3040 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3041
3042 if (sysfs_streq(buf, "peripheral")) {
3043 mdwc->vbus_active = true;
3044 mdwc->id_state = DWC3_ID_FLOAT;
3045 } else if (sysfs_streq(buf, "host")) {
3046 mdwc->vbus_active = false;
3047 mdwc->id_state = DWC3_ID_GROUND;
3048 } else {
3049 mdwc->vbus_active = false;
3050 mdwc->id_state = DWC3_ID_FLOAT;
3051 }
3052
3053 dwc3_ext_event_notify(mdwc);
3054
3055 return count;
3056}
3057
3058static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303059static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003060
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003061static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3062 char *buf)
3063{
3064 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3065 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3066
3067 return snprintf(buf, PAGE_SIZE, "%s\n",
3068 usb_speed_string(dwc->max_hw_supp_speed));
3069}
3070
3071static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3072 const char *buf, size_t count)
3073{
3074 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3075 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3076 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3077
3078 if (sysfs_streq(buf, "high"))
3079 req_speed = USB_SPEED_HIGH;
3080 else if (sysfs_streq(buf, "super"))
3081 req_speed = USB_SPEED_SUPER;
3082
3083 if (req_speed != USB_SPEED_UNKNOWN &&
3084 req_speed != dwc->max_hw_supp_speed) {
3085 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3086 schedule_work(&mdwc->restart_usb_work);
3087 }
3088
3089 return count;
3090}
3091static DEVICE_ATTR_RW(speed);
3092
Mayank Rana511f3b22016-08-02 12:00:11 -07003093static int dwc3_msm_probe(struct platform_device *pdev)
3094{
3095 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3096 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003097 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003098 struct dwc3_msm *mdwc;
3099 struct dwc3 *dwc;
3100 struct resource *res;
3101 void __iomem *tcsr;
3102 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003103 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003104 int ext_hub_reset_gpio;
3105 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003106 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003107
3108 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3109 if (!mdwc)
3110 return -ENOMEM;
3111
3112 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3113 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3114 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3115 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3116 return -EOPNOTSUPP;
3117 }
3118 }
3119
3120 platform_set_drvdata(pdev, mdwc);
3121 mdwc->dev = &pdev->dev;
3122
3123 INIT_LIST_HEAD(&mdwc->req_complete_list);
3124 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3125 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003126 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003127 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303128 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Hemant Kumar006fae42017-07-12 18:11:25 -07003129 INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
Mayank Rana511f3b22016-08-02 12:00:11 -07003130
3131 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3132 if (!mdwc->dwc3_wq) {
3133 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3134 return -ENOMEM;
3135 }
3136
3137 /* Get all clks and gdsc reference */
3138 ret = dwc3_msm_get_clk_gdsc(mdwc);
3139 if (ret) {
3140 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
3141 return ret;
3142 }
3143
3144 mdwc->id_state = DWC3_ID_FLOAT;
3145 set_bit(ID, &mdwc->inputs);
3146
3147 mdwc->charging_disabled = of_property_read_bool(node,
3148 "qcom,charging-disabled");
3149
3150 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3151 &mdwc->lpm_to_suspend_delay);
3152 if (ret) {
3153 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3154 mdwc->lpm_to_suspend_delay = 0;
3155 }
3156
Mayank Ranad339abe2017-05-31 09:19:49 -07003157 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3158 for (i = 0; i < USB_MAX_IRQ; i++) {
3159 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3160 IRQF_ONESHOT;
3161 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3162 mdwc->wakeup_irq[i].name);
3163 if (mdwc->wakeup_irq[i].irq < 0) {
3164 /* pwr_evnt_irq is only mandatory irq */
3165 if (!strcmp(mdwc->wakeup_irq[i].name,
3166 "pwr_event_irq")) {
3167 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3168 mdwc->wakeup_irq[i].name);
3169 ret = -EINVAL;
3170 goto err;
3171 }
3172 mdwc->wakeup_irq[i].irq = 0;
3173 } else {
3174 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3175 IRQ_NOAUTOEN);
3176 /* ss_phy_irq is level trigger interrupt */
3177 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3178 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3179 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003180
Mayank Ranad339abe2017-05-31 09:19:49 -07003181 ret = devm_request_threaded_irq(&pdev->dev,
3182 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003183 msm_dwc3_pwr_irq,
3184 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003185 irq_type,
3186 mdwc->wakeup_irq[i].name, mdwc);
3187 if (ret) {
3188 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3189 mdwc->wakeup_irq[i].name, ret);
3190 goto err;
3191 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003192 }
3193 }
3194
3195 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3196 if (!res) {
3197 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3198 } else {
3199 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3200 resource_size(res));
3201 if (IS_ERR_OR_NULL(tcsr)) {
3202 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3203 } else {
3204 /* Enable USB3 on the primary USB port. */
3205 writel_relaxed(0x1, tcsr);
3206 /*
3207 * Ensure that TCSR write is completed before
3208 * USB registers initialization.
3209 */
3210 mb();
3211 }
3212 }
3213
3214 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3215 if (!res) {
3216 dev_err(&pdev->dev, "missing memory base resource\n");
3217 ret = -ENODEV;
3218 goto err;
3219 }
3220
3221 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3222 resource_size(res));
3223 if (!mdwc->base) {
3224 dev_err(&pdev->dev, "ioremap failed\n");
3225 ret = -ENODEV;
3226 goto err;
3227 }
3228
3229 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3230 "ahb2phy_base");
3231 if (res) {
3232 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3233 res->start, resource_size(res));
3234 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3235 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3236 mdwc->ahb2phy_base = NULL;
3237 } else {
3238 /*
3239 * On some targets cfg_ahb_clk depends upon usb gdsc
3240 * regulator. If cfg_ahb_clk is enabled without
3241 * turning on usb gdsc regulator clk is stuck off.
3242 */
3243 dwc3_msm_config_gdsc(mdwc, 1);
3244 clk_prepare_enable(mdwc->cfg_ahb_clk);
3245 /* Configure AHB2PHY for one wait state read/write*/
3246 val = readl_relaxed(mdwc->ahb2phy_base +
3247 PERIPH_SS_AHB2PHY_TOP_CFG);
3248 if (val != ONE_READ_WRITE_WAIT) {
3249 writel_relaxed(ONE_READ_WRITE_WAIT,
3250 mdwc->ahb2phy_base +
3251 PERIPH_SS_AHB2PHY_TOP_CFG);
3252 /* complete above write before using USB PHY */
3253 mb();
3254 }
3255 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3256 dwc3_msm_config_gdsc(mdwc, 0);
3257 }
3258 }
3259
3260 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3261 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3262 if (IS_ERR(mdwc->dbm)) {
3263 dev_err(&pdev->dev, "unable to get dbm device\n");
3264 ret = -EPROBE_DEFER;
3265 goto err;
3266 }
3267 /*
3268 * Add power event if the dbm indicates coming out of L1
3269 * by interrupt
3270 */
3271 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003272 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003273 dev_err(&pdev->dev,
3274 "need pwr_event_irq exiting L1\n");
3275 ret = -EINVAL;
3276 goto err;
3277 }
3278 }
3279 }
3280
3281 ext_hub_reset_gpio = of_get_named_gpio(node,
3282 "qcom,ext-hub-reset-gpio", 0);
3283
3284 if (gpio_is_valid(ext_hub_reset_gpio)
3285 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3286 "qcom,ext-hub-reset-gpio"))) {
3287 /* reset external hub */
3288 gpio_direction_output(ext_hub_reset_gpio, 1);
3289 /*
3290 * Hub reset should be asserted for minimum 5microsec
3291 * before deasserting.
3292 */
3293 usleep_range(5, 1000);
3294 gpio_direction_output(ext_hub_reset_gpio, 0);
3295 }
3296
3297 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3298 &mdwc->tx_fifo_size))
3299 dev_err(&pdev->dev,
3300 "unable to read platform data tx fifo size\n");
3301
3302 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3303 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003304 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3305 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003306 dwc3_set_notifier(&dwc3_msm_notify_event);
3307
Jack Phambbe27962017-03-23 18:42:26 -07003308 ret = dwc3_msm_init_iommu(mdwc);
3309 if (ret)
3310 goto err;
3311
Mayank Rana511f3b22016-08-02 12:00:11 -07003312 /* Assumes dwc3 is the first DT child of dwc3-msm */
3313 dwc3_node = of_get_next_available_child(node, NULL);
3314 if (!dwc3_node) {
3315 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3316 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003317 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003318 }
3319
3320 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3321 if (ret) {
3322 dev_err(&pdev->dev,
3323 "failed to add create dwc3 core\n");
3324 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003325 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003326 }
3327
3328 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3329 of_node_put(dwc3_node);
3330 if (!mdwc->dwc3) {
3331 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3332 goto put_dwc3;
3333 }
3334
3335 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3336 "usb-phy", 0);
3337 if (IS_ERR(mdwc->hs_phy)) {
3338 dev_err(&pdev->dev, "unable to get hsphy device\n");
3339 ret = PTR_ERR(mdwc->hs_phy);
3340 goto put_dwc3;
3341 }
3342 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3343 "usb-phy", 1);
3344 if (IS_ERR(mdwc->ss_phy)) {
3345 dev_err(&pdev->dev, "unable to get ssphy device\n");
3346 ret = PTR_ERR(mdwc->ss_phy);
3347 goto put_dwc3;
3348 }
3349
3350 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3351 if (mdwc->bus_scale_table) {
3352 mdwc->bus_perf_client =
3353 msm_bus_scale_register_client(mdwc->bus_scale_table);
3354 }
3355
3356 dwc = platform_get_drvdata(mdwc->dwc3);
3357 if (!dwc) {
3358 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3359 goto put_dwc3;
3360 }
3361
3362 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3363 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3364
3365 if (cpu_to_affin)
3366 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3367
Mayank Ranaf4918d32016-12-15 13:35:55 -08003368 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3369 &mdwc->num_gsi_event_buffers);
3370
Jack Pham9faa51df2017-04-03 18:13:40 -07003371 /* IOMMU will be reattached upon each resume/connect */
3372 if (mdwc->iommu_map)
3373 arm_iommu_detach_device(mdwc->dev);
3374
Mayank Rana511f3b22016-08-02 12:00:11 -07003375 /*
3376 * Clocks and regulators will not be turned on until the first time
3377 * runtime PM resume is called. This is to allow for booting up with
3378 * charger already connected so as not to disturb PHY line states.
3379 */
3380 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3381 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003382 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3383 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003384 device_init_wakeup(mdwc->dev, 1);
3385
3386 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3387 pm_runtime_get_noresume(mdwc->dev);
3388
3389 ret = dwc3_msm_extcon_register(mdwc);
3390 if (ret)
3391 goto put_dwc3;
3392
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303393 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3394 &mdwc->pm_qos_latency);
3395 if (ret) {
3396 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3397 mdwc->pm_qos_latency = 0;
3398 }
3399
Hemant Kumar8220a982017-01-19 18:11:34 -08003400 mdwc->usb_psy = power_supply_get_by_name("usb");
3401 if (!mdwc->usb_psy) {
3402 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3403 pval.intval = -EINVAL;
3404 } else {
3405 power_supply_get_property(mdwc->usb_psy,
3406 POWER_SUPPLY_PROP_PRESENT, &pval);
3407 }
3408
Mayank Rana511f3b22016-08-02 12:00:11 -07003409 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003410 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003411 EXTCON_USB))
3412 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003413 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003414 EXTCON_USB_HOST))
3415 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003416 else if (!pval.intval) {
3417 /* USB cable is not connected */
3418 schedule_delayed_work(&mdwc->sm_work, 0);
3419 } else {
3420 if (pval.intval > 0)
3421 dev_info(mdwc->dev, "charger detection in progress\n");
3422 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003423
3424 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003425 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003426
Mayank Rana511f3b22016-08-02 12:00:11 -07003427 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3428 if (!dwc->is_drd && host_mode) {
3429 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3430 mdwc->id_state = DWC3_ID_GROUND;
3431 dwc3_ext_event_notify(mdwc);
3432 }
3433
3434 return 0;
3435
3436put_dwc3:
3437 platform_device_put(mdwc->dwc3);
3438 if (mdwc->bus_perf_client)
3439 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Jack Phambbe27962017-03-23 18:42:26 -07003440uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003441 if (mdwc->iommu_map) {
3442 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003443 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003444 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003445err:
3446 return ret;
3447}
3448
3449static int dwc3_msm_remove_children(struct device *dev, void *data)
3450{
3451 device_unregister(dev);
3452 return 0;
3453}
3454
3455static int dwc3_msm_remove(struct platform_device *pdev)
3456{
3457 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003458 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003459 int ret_pm;
3460
3461 device_remove_file(&pdev->dev, &dev_attr_mode);
3462
3463 if (cpu_to_affin)
3464 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3465
3466 /*
3467 * In case of system suspend, pm_runtime_get_sync fails.
3468 * Hence turn ON the clocks manually.
3469 */
3470 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003471 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003472 if (ret_pm < 0) {
3473 dev_err(mdwc->dev,
3474 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303475 if (mdwc->noc_aggr_clk)
3476 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003477 clk_prepare_enable(mdwc->utmi_clk);
3478 clk_prepare_enable(mdwc->core_clk);
3479 clk_prepare_enable(mdwc->iface_clk);
3480 clk_prepare_enable(mdwc->sleep_clk);
3481 if (mdwc->bus_aggr_clk)
3482 clk_prepare_enable(mdwc->bus_aggr_clk);
3483 clk_prepare_enable(mdwc->xo_clk);
3484 }
3485
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303486 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003487 cancel_delayed_work_sync(&mdwc->sm_work);
3488
3489 if (mdwc->hs_phy)
3490 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3491 platform_device_put(mdwc->dwc3);
3492 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3493
Mayank Rana08e41922017-03-02 15:25:48 -08003494 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003495 pm_runtime_disable(mdwc->dev);
3496 pm_runtime_barrier(mdwc->dev);
3497 pm_runtime_put_sync(mdwc->dev);
3498 pm_runtime_set_suspended(mdwc->dev);
3499 device_wakeup_disable(mdwc->dev);
3500
3501 if (mdwc->bus_perf_client)
3502 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3503
3504 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3505 regulator_disable(mdwc->vbus_reg);
3506
Mayank Ranad339abe2017-05-31 09:19:49 -07003507 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3508 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3509 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3510 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3511 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3512 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3513 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3514 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3515 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003516
3517 clk_disable_unprepare(mdwc->utmi_clk);
3518 clk_set_rate(mdwc->core_clk, 19200000);
3519 clk_disable_unprepare(mdwc->core_clk);
3520 clk_disable_unprepare(mdwc->iface_clk);
3521 clk_disable_unprepare(mdwc->sleep_clk);
3522 clk_disable_unprepare(mdwc->xo_clk);
3523 clk_put(mdwc->xo_clk);
3524
3525 dwc3_msm_config_gdsc(mdwc, 0);
3526
Jack Phambbe27962017-03-23 18:42:26 -07003527 if (mdwc->iommu_map) {
3528 if (!atomic_read(&dwc->in_lpm))
3529 arm_iommu_detach_device(mdwc->dev);
3530 arm_iommu_release_mapping(mdwc->iommu_map);
3531 }
3532
Mayank Rana511f3b22016-08-02 12:00:11 -07003533 return 0;
3534}
3535
Jack Pham4d4e9342016-12-07 19:25:02 -08003536static int dwc3_msm_host_notifier(struct notifier_block *nb,
3537 unsigned long event, void *ptr)
3538{
3539 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3540 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3541 struct usb_device *udev = ptr;
3542 union power_supply_propval pval;
3543 unsigned int max_power;
3544
3545 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3546 return NOTIFY_DONE;
3547
3548 if (!mdwc->usb_psy) {
3549 mdwc->usb_psy = power_supply_get_by_name("usb");
3550 if (!mdwc->usb_psy)
3551 return NOTIFY_DONE;
3552 }
3553
3554 /*
3555 * For direct-attach devices, new udev is direct child of root hub
3556 * i.e. dwc -> xhci -> root_hub -> udev
3557 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3558 */
3559 if (udev->parent && !udev->parent->parent &&
3560 udev->dev.parent->parent == &dwc->xhci->dev) {
3561 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003562 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3563 /*
3564 * Core clock rate can be reduced only if root
3565 * hub SS port is not enabled/connected.
3566 */
3567 clk_set_rate(mdwc->core_clk,
3568 mdwc->core_clk_rate_hs);
3569 dev_dbg(mdwc->dev,
3570 "set hs core clk rate %ld\n",
3571 mdwc->core_clk_rate_hs);
3572 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3573 } else {
3574 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3575 }
3576
Jack Pham4d4e9342016-12-07 19:25:02 -08003577 if (udev->speed >= USB_SPEED_SUPER)
3578 max_power = udev->actconfig->desc.bMaxPower * 8;
3579 else
3580 max_power = udev->actconfig->desc.bMaxPower * 2;
3581 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3582 dev_name(&udev->dev), max_power);
3583
3584 /* inform PMIC of max power so it can optimize boost */
3585 pval.intval = max_power * 1000;
3586 power_supply_set_property(mdwc->usb_psy,
3587 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3588 } else {
3589 pval.intval = 0;
3590 power_supply_set_property(mdwc->usb_psy,
3591 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003592
3593 /* set rate back to default core clk rate */
3594 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3595 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3596 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003597 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003598 }
3599 }
3600
3601 return NOTIFY_DONE;
3602}
3603
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303604static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3605{
3606 static bool curr_perf_mode;
3607 int latency = mdwc->pm_qos_latency;
3608
3609 if ((curr_perf_mode == perf_mode) || !latency)
3610 return;
3611
3612 if (perf_mode)
3613 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3614 else
3615 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3616 PM_QOS_DEFAULT_VALUE);
3617
3618 curr_perf_mode = perf_mode;
3619 pr_debug("%s: latency updated to: %d\n", __func__,
3620 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3621}
3622
3623static void msm_dwc3_perf_vote_work(struct work_struct *w)
3624{
3625 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3626 perf_vote_work.work);
3627 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3628 static unsigned long last_irq_cnt;
3629 bool in_perf_mode = false;
3630
3631 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3632 in_perf_mode = true;
3633
3634 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3635 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3636
3637 last_irq_cnt = dwc->irq_cnt;
3638 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3639 schedule_delayed_work(&mdwc->perf_vote_work,
3640 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3641}
3642
Mayank Rana511f3b22016-08-02 12:00:11 -07003643#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3644
3645/**
3646 * dwc3_otg_start_host - helper function for starting/stoping the host
3647 * controller driver.
3648 *
3649 * @mdwc: Pointer to the dwc3_msm structure.
3650 * @on: start / stop the host controller driver.
3651 *
3652 * Returns 0 on success otherwise negative errno.
3653 */
3654static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3655{
3656 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3657 int ret = 0;
3658
3659 if (!dwc->xhci)
3660 return -EINVAL;
3661
3662 /*
3663 * The vbus_reg pointer could have multiple values
3664 * NULL: regulator_get() hasn't been called, or was previously deferred
3665 * IS_ERR: regulator could not be obtained, so skip using it
3666 * Valid pointer otherwise
3667 */
3668 if (!mdwc->vbus_reg) {
3669 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3670 "vbus_dwc3");
3671 if (IS_ERR(mdwc->vbus_reg) &&
3672 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3673 /* regulators may not be ready, so retry again later */
3674 mdwc->vbus_reg = NULL;
3675 return -EPROBE_DEFER;
3676 }
3677 }
3678
3679 if (on) {
3680 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3681
Mayank Rana511f3b22016-08-02 12:00:11 -07003682 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003683 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003684 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003685 usb_phy_notify_connect(mdwc->ss_phy,
3686 USB_SPEED_SUPER);
3687 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003688
Mayank Rana0d5efd72017-06-08 10:06:00 -07003689 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003690 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003691 dbg_event(0xFF, "StrtHost gync",
3692 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003693 if (!IS_ERR(mdwc->vbus_reg))
3694 ret = regulator_enable(mdwc->vbus_reg);
3695 if (ret) {
3696 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3697 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3698 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3699 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003700 dbg_event(0xFF, "vregerr psync",
3701 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003702 return ret;
3703 }
3704
3705 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3706
Jack Pham4d4e9342016-12-07 19:25:02 -08003707 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3708 usb_register_notify(&mdwc->host_nb);
3709
Manu Gautam976fdfc2016-08-18 09:27:35 +05303710 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3711 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003712 /*
3713 * FIXME If micro A cable is disconnected during system suspend,
3714 * xhci platform device will be removed before runtime pm is
3715 * enabled for xhci device. Due to this, disable_depth becomes
3716 * greater than one and runtimepm is not enabled for next microA
3717 * connect. Fix this by calling pm_runtime_init for xhci device.
3718 */
3719 pm_runtime_init(&dwc->xhci->dev);
3720 ret = platform_device_add(dwc->xhci);
3721 if (ret) {
3722 dev_err(mdwc->dev,
3723 "%s: failed to add XHCI pdev ret=%d\n",
3724 __func__, ret);
3725 if (!IS_ERR(mdwc->vbus_reg))
3726 regulator_disable(mdwc->vbus_reg);
3727 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3728 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3729 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003730 dbg_event(0xFF, "pdeverr psync",
3731 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003732 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003733 return ret;
3734 }
3735
3736 /*
3737 * In some cases it is observed that USB PHY is not going into
3738 * suspend with host mode suspend functionality. Hence disable
3739 * XHCI's runtime PM here if disable_host_mode_pm is set.
3740 */
3741 if (mdwc->disable_host_mode_pm)
3742 pm_runtime_disable(&dwc->xhci->dev);
3743
3744 mdwc->in_host_mode = true;
3745 dwc3_usb3_phy_suspend(dwc, true);
3746
3747 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003748 dbg_event(0xFF, "StrtHost psync",
3749 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003750 pm_runtime_mark_last_busy(mdwc->dev);
3751 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303752#ifdef CONFIG_SMP
3753 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3754 mdwc->pm_qos_req_dma.irq = dwc->irq;
3755#endif
3756 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3757 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3758 /* start in perf mode for better performance initially */
3759 msm_dwc3_perf_vote_update(mdwc, true);
3760 schedule_delayed_work(&mdwc->perf_vote_work,
3761 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003762 } else {
3763 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3764
Manu Gautam976fdfc2016-08-18 09:27:35 +05303765 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003766 if (!IS_ERR(mdwc->vbus_reg))
3767 ret = regulator_disable(mdwc->vbus_reg);
3768 if (ret) {
3769 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3770 return ret;
3771 }
3772
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303773 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3774 msm_dwc3_perf_vote_update(mdwc, false);
3775 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3776
Mayank Rana511f3b22016-08-02 12:00:11 -07003777 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003778 dbg_event(0xFF, "StopHost gsync",
3779 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003780 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003781 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3782 usb_phy_notify_disconnect(mdwc->ss_phy,
3783 USB_SPEED_SUPER);
3784 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3785 }
3786
Mayank Rana511f3b22016-08-02 12:00:11 -07003787 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003788 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003789 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003790
Mayank Rana511f3b22016-08-02 12:00:11 -07003791 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003792 mdwc->in_host_mode = false;
3793
Mayank Rana511f3b22016-08-02 12:00:11 -07003794 pm_runtime_mark_last_busy(mdwc->dev);
3795 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003796 dbg_event(0xFF, "StopHost psync",
3797 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003798 }
3799
3800 return 0;
3801}
3802
3803static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3804{
3805 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3806
3807 /* Update OTG VBUS Valid from HSPHY to controller */
3808 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3809 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3810 UTMI_OTG_VBUS_VALID,
3811 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3812
3813 /* Update only if Super Speed is supported */
3814 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3815 /* Update VBUS Valid from SSPHY to controller */
3816 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3817 LANE0_PWR_PRESENT,
3818 vbus_present ? LANE0_PWR_PRESENT : 0);
3819 }
3820}
3821
3822/**
3823 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3824 *
3825 * @mdwc: Pointer to the dwc3_msm structure.
3826 * @on: Turn ON/OFF the gadget.
3827 *
3828 * Returns 0 on success otherwise negative errno.
3829 */
3830static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3831{
3832 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3833
3834 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003835 dbg_event(0xFF, "StrtGdgt gsync",
3836 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003837
3838 if (on) {
3839 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3840 __func__, dwc->gadget.name);
3841
3842 dwc3_override_vbus_status(mdwc, true);
3843 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3844 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3845
3846 /*
3847 * Core reset is not required during start peripheral. Only
3848 * DBM reset is required, hence perform only DBM reset here.
3849 */
3850 dwc3_msm_block_reset(mdwc, false);
3851
3852 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3853 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303854#ifdef CONFIG_SMP
3855 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3856 mdwc->pm_qos_req_dma.irq = dwc->irq;
3857#endif
3858 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3859 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3860 /* start in perf mode for better performance initially */
3861 msm_dwc3_perf_vote_update(mdwc, true);
3862 schedule_delayed_work(&mdwc->perf_vote_work,
3863 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003864 } else {
3865 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3866 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303867 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3868 msm_dwc3_perf_vote_update(mdwc, false);
3869 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3870
Mayank Rana511f3b22016-08-02 12:00:11 -07003871 usb_gadget_vbus_disconnect(&dwc->gadget);
3872 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3873 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3874 dwc3_override_vbus_status(mdwc, false);
3875 dwc3_usb3_phy_suspend(dwc, false);
3876 }
3877
3878 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003879 dbg_event(0xFF, "StopGdgt psync",
3880 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003881
3882 return 0;
3883}
3884
Hemant Kumar006fae42017-07-12 18:11:25 -07003885static int get_psy_type(struct dwc3_msm *mdwc)
Mayank Rana511f3b22016-08-02 12:00:11 -07003886{
Jack Pham8caff352016-08-19 16:33:55 -07003887 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003888
3889 if (mdwc->charging_disabled)
Hemant Kumar006fae42017-07-12 18:11:25 -07003890 return -EINVAL;
Mayank Rana511f3b22016-08-02 12:00:11 -07003891
3892 if (!mdwc->usb_psy) {
3893 mdwc->usb_psy = power_supply_get_by_name("usb");
3894 if (!mdwc->usb_psy) {
Hemant Kumar006fae42017-07-12 18:11:25 -07003895 dev_err(mdwc->dev, "Could not get usb psy\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003896 return -ENODEV;
3897 }
3898 }
3899
Hemant Kumar006fae42017-07-12 18:11:25 -07003900 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
3901 &pval);
3902
3903 return pval.intval;
3904}
3905
3906static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3907{
3908 union power_supply_propval pval = {0};
3909 int ret, psy_type;
3910
3911 if (mdwc->max_power == mA)
3912 return 0;
3913
3914 psy_type = get_psy_type(mdwc);
Hemant Kumard6bae052017-07-27 15:11:25 -07003915 if (psy_type == POWER_SUPPLY_TYPE_USB) {
3916 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3917 /* Set max current limit in uA */
3918 pval.intval = 1000 * mA;
3919 } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
3920 pval.intval = -ETIMEDOUT;
3921 } else {
Jack Pham8caff352016-08-19 16:33:55 -07003922 return 0;
Hemant Kumard6bae052017-07-27 15:11:25 -07003923 }
Jack Pham8caff352016-08-19 16:33:55 -07003924
Jack Phamd72bafe2016-08-09 11:07:22 -07003925 ret = power_supply_set_property(mdwc->usb_psy,
3926 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3927 if (ret) {
3928 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3929 return ret;
3930 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003931
3932 mdwc->max_power = mA;
3933 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003934}
3935
3936
3937/**
3938 * dwc3_otg_sm_work - workqueue function.
3939 *
3940 * @w: Pointer to the dwc3 otg workqueue
3941 *
3942 * NOTE: After any change in otg_state, we must reschdule the state machine.
3943 */
3944static void dwc3_otg_sm_work(struct work_struct *w)
3945{
3946 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3947 struct dwc3 *dwc = NULL;
3948 bool work = 0;
3949 int ret = 0;
3950 unsigned long delay = 0;
3951 const char *state;
3952
3953 if (mdwc->dwc3)
3954 dwc = platform_get_drvdata(mdwc->dwc3);
3955
3956 if (!dwc) {
3957 dev_err(mdwc->dev, "dwc is NULL.\n");
3958 return;
3959 }
3960
3961 state = usb_otg_state_string(mdwc->otg_state);
3962 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003963 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003964
3965 /* Check OTG state */
3966 switch (mdwc->otg_state) {
3967 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08003968 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07003969 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08003970 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
3971 dbg_event(0xFF, "undef_id_!bsv", 0);
3972 pm_runtime_set_active(mdwc->dev);
3973 pm_runtime_enable(mdwc->dev);
3974 pm_runtime_get_noresume(mdwc->dev);
3975 dwc3_msm_resume(mdwc);
3976 pm_runtime_put_sync(mdwc->dev);
3977 dbg_event(0xFF, "Undef NoUSB",
3978 atomic_read(&mdwc->dev->power.usage_count));
3979 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003980 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08003981 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003982
Mayank Rana08e41922017-03-02 15:25:48 -08003983 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003984 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08003985 pm_runtime_set_suspended(mdwc->dev);
3986 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003987 /* fall-through */
3988 case OTG_STATE_B_IDLE:
3989 if (!test_bit(ID, &mdwc->inputs)) {
3990 dev_dbg(mdwc->dev, "!id\n");
3991 mdwc->otg_state = OTG_STATE_A_IDLE;
3992 work = 1;
3993 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3994 dev_dbg(mdwc->dev, "b_sess_vld\n");
Hemant Kumar006fae42017-07-12 18:11:25 -07003995 if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
3996 queue_delayed_work(mdwc->dwc3_wq,
3997 &mdwc->sdp_check,
3998 msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
Mayank Rana511f3b22016-08-02 12:00:11 -07003999 /*
4000 * Increment pm usage count upon cable connect. Count
4001 * is decremented in OTG_STATE_B_PERIPHERAL state on
4002 * cable disconnect or in bus suspend.
4003 */
4004 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004005 dbg_event(0xFF, "BIDLE gsync",
4006 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004007 dwc3_otg_start_peripheral(mdwc, 1);
4008 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4009 work = 1;
4010 } else {
4011 dwc3_msm_gadget_vbus_draw(mdwc, 0);
4012 dev_dbg(mdwc->dev, "Cable disconnected\n");
4013 }
4014 break;
4015
4016 case OTG_STATE_B_PERIPHERAL:
4017 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
4018 !test_bit(ID, &mdwc->inputs)) {
4019 dev_dbg(mdwc->dev, "!id || !bsv\n");
4020 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004021 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004022 dwc3_otg_start_peripheral(mdwc, 0);
4023 /*
4024 * Decrement pm usage count upon cable disconnect
4025 * which was incremented upon cable connect in
4026 * OTG_STATE_B_IDLE state
4027 */
4028 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004029 dbg_event(0xFF, "!BSV psync",
4030 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004031 work = 1;
4032 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
4033 test_bit(B_SESS_VLD, &mdwc->inputs)) {
4034 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
4035 mdwc->otg_state = OTG_STATE_B_SUSPEND;
4036 /*
4037 * Decrement pm usage count upon bus suspend.
4038 * Count was incremented either upon cable
4039 * connect in OTG_STATE_B_IDLE or host
4040 * initiated resume after bus suspend in
4041 * OTG_STATE_B_SUSPEND state
4042 */
4043 pm_runtime_mark_last_busy(mdwc->dev);
4044 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004045 dbg_event(0xFF, "SUSP put",
4046 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004047 }
4048 break;
4049
4050 case OTG_STATE_B_SUSPEND:
4051 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4052 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4053 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004054 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004055 dwc3_otg_start_peripheral(mdwc, 0);
4056 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4057 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4058 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4059 /*
4060 * Increment pm usage count upon host
4061 * initiated resume. Count was decremented
4062 * upon bus suspend in
4063 * OTG_STATE_B_PERIPHERAL state.
4064 */
4065 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004066 dbg_event(0xFF, "!SUSP gsync",
4067 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004068 }
4069 break;
4070
4071 case OTG_STATE_A_IDLE:
4072 /* Switch to A-Device*/
4073 if (test_bit(ID, &mdwc->inputs)) {
4074 dev_dbg(mdwc->dev, "id\n");
4075 mdwc->otg_state = OTG_STATE_B_IDLE;
4076 mdwc->vbus_retry_count = 0;
4077 work = 1;
4078 } else {
4079 mdwc->otg_state = OTG_STATE_A_HOST;
4080 ret = dwc3_otg_start_host(mdwc, 1);
4081 if ((ret == -EPROBE_DEFER) &&
4082 mdwc->vbus_retry_count < 3) {
4083 /*
4084 * Get regulator failed as regulator driver is
4085 * not up yet. Will try to start host after 1sec
4086 */
4087 mdwc->otg_state = OTG_STATE_A_IDLE;
4088 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4089 delay = VBUS_REG_CHECK_DELAY;
4090 work = 1;
4091 mdwc->vbus_retry_count++;
4092 } else if (ret) {
4093 dev_err(mdwc->dev, "unable to start host\n");
4094 mdwc->otg_state = OTG_STATE_A_IDLE;
4095 goto ret;
4096 }
4097 }
4098 break;
4099
4100 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304101 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4102 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004103 dwc3_otg_start_host(mdwc, 0);
4104 mdwc->otg_state = OTG_STATE_B_IDLE;
4105 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304106 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004107 work = 1;
4108 } else {
4109 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004110 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004111 if (dwc)
4112 pm_runtime_resume(&dwc->xhci->dev);
4113 }
4114 break;
4115
4116 default:
4117 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4118
4119 }
4120
4121 if (work)
4122 schedule_delayed_work(&mdwc->sm_work, delay);
4123
4124ret:
4125 return;
4126}
4127
4128#ifdef CONFIG_PM_SLEEP
4129static int dwc3_msm_pm_suspend(struct device *dev)
4130{
4131 int ret = 0;
4132 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4133 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4134
4135 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004136 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004137
4138 flush_workqueue(mdwc->dwc3_wq);
4139 if (!atomic_read(&dwc->in_lpm)) {
4140 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4141 return -EBUSY;
4142 }
4143
4144 ret = dwc3_msm_suspend(mdwc);
4145 if (!ret)
4146 atomic_set(&mdwc->pm_suspended, 1);
4147
4148 return ret;
4149}
4150
4151static int dwc3_msm_pm_resume(struct device *dev)
4152{
4153 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004154 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004155
4156 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004157 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004158
Mayank Rana511f3b22016-08-02 12:00:11 -07004159 /* flush to avoid race in read/write of pm_suspended */
4160 flush_workqueue(mdwc->dwc3_wq);
4161 atomic_set(&mdwc->pm_suspended, 0);
4162
4163 /* kick in otg state machine */
4164 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4165
4166 return 0;
4167}
4168#endif
4169
4170#ifdef CONFIG_PM
4171static int dwc3_msm_runtime_idle(struct device *dev)
4172{
Mayank Rana08e41922017-03-02 15:25:48 -08004173 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4174 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4175
Mayank Rana511f3b22016-08-02 12:00:11 -07004176 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004177 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004178
4179 return 0;
4180}
4181
4182static int dwc3_msm_runtime_suspend(struct device *dev)
4183{
4184 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004185 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004186
4187 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004188 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004189
4190 return dwc3_msm_suspend(mdwc);
4191}
4192
4193static int dwc3_msm_runtime_resume(struct device *dev)
4194{
4195 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004196 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004197
4198 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004199 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004200
4201 return dwc3_msm_resume(mdwc);
4202}
4203#endif
4204
4205static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4206 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4207 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4208 dwc3_msm_runtime_idle)
4209};
4210
4211static const struct of_device_id of_dwc3_matach[] = {
4212 {
4213 .compatible = "qcom,dwc-usb3-msm",
4214 },
4215 { },
4216};
4217MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4218
4219static struct platform_driver dwc3_msm_driver = {
4220 .probe = dwc3_msm_probe,
4221 .remove = dwc3_msm_remove,
4222 .driver = {
4223 .name = "msm-dwc3",
4224 .pm = &dwc3_msm_dev_pm_ops,
4225 .of_match_table = of_dwc3_matach,
4226 },
4227};
4228
4229MODULE_LICENSE("GPL v2");
4230MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4231
4232static int dwc3_msm_init(void)
4233{
4234 return platform_driver_register(&dwc3_msm_driver);
4235}
4236module_init(dwc3_msm_init);
4237
4238static void __exit dwc3_msm_exit(void)
4239{
4240 platform_driver_unregister(&dwc3_msm_driver);
4241}
4242module_exit(dwc3_msm_exit);