blob: a6c17264c24922cb9e60e888943987ed2ab1a34c [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
Hemant Kumar006fae42017-07-12 18:11:25 -070058#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
59
Mayank Rana511f3b22016-08-02 12:00:11 -070060/* time out to wait for USB cable status notification (in ms)*/
61#define SM_INIT_TIMEOUT 30000
62
63/* AHB2PHY register offsets */
64#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
65
66/* AHB2PHY read/write waite value */
67#define ONE_READ_WRITE_WAIT 0x11
68
69/* cpu to fix usb interrupt */
70static int cpu_to_affin;
71module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
73
Mayank Ranaf70d8212017-06-12 14:02:07 -070074/* override for USB speed */
75static int override_usb_speed;
76module_param(override_usb_speed, int, 0644);
77MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
78
Mayank Rana511f3b22016-08-02 12:00:11 -070079/* XHCI registers */
80#define USB3_HCSPARAMS1 (0x4)
81#define USB3_PORTSC (0x420)
82
83/**
84 * USB QSCRATCH Hardware registers
85 *
86 */
87#define QSCRATCH_REG_OFFSET (0x000F8800)
88#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
89#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
90#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
91#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
92
93#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
94#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
95#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
96#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
97#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
98
99/* QSCRATCH_GENERAL_CFG register bit offset */
100#define PIPE_UTMI_CLK_SEL BIT(0)
101#define PIPE3_PHYSTATUS_SW BIT(3)
102#define PIPE_UTMI_CLK_DIS BIT(8)
103
104#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
105#define UTMI_OTG_VBUS_VALID BIT(20)
106#define SW_SESSVLD_SEL BIT(28)
107
108#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
109#define LANE0_PWR_PRESENT BIT(24)
110
111/* GSI related registers */
112#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
113#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
114
115#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
116#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
117#define GSI_CLK_EN_MASK BIT(12)
118#define BLOCK_GSI_WR_GO_MASK BIT(1)
119#define GSI_EN_MASK BIT(0)
120
121#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
122#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
123#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
124#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
125
126#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
127#define GSI_WR_CTRL_STATE_MASK BIT(15)
128
Mayank Ranaf4918d32016-12-15 13:35:55 -0800129#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
130#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
131#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
132#define DWC3_GEVENT_TYPE_GSI 0x3
133
Mayank Rana511f3b22016-08-02 12:00:11 -0700134struct dwc3_msm_req_complete {
135 struct list_head list_item;
136 struct usb_request *req;
137 void (*orig_complete)(struct usb_ep *ep,
138 struct usb_request *req);
139};
140
141enum dwc3_id_state {
142 DWC3_ID_GROUND = 0,
143 DWC3_ID_FLOAT,
144};
145
146/* for type c cable */
147enum plug_orientation {
148 ORIENTATION_NONE,
149 ORIENTATION_CC1,
150 ORIENTATION_CC2,
151};
152
Mayank Ranad339abe2017-05-31 09:19:49 -0700153enum msm_usb_irq {
154 HS_PHY_IRQ,
155 PWR_EVNT_IRQ,
156 DP_HS_PHY_IRQ,
157 DM_HS_PHY_IRQ,
158 SS_PHY_IRQ,
159 USB_MAX_IRQ
160};
161
162struct usb_irq {
163 char *name;
164 int irq;
165 bool enable;
166};
167
168static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
169 {"hs_phy_irq", 0},
170 {"pwr_event_irq", 0},
171 {"dp_hs_phy_irq", 0},
172 {"dm_hs_phy_irq", 0},
173 {"ss_phy_irq", 0},
174};
175
Mayank Rana511f3b22016-08-02 12:00:11 -0700176/* Input bits to state machine (mdwc->inputs) */
177
178#define ID 0
179#define B_SESS_VLD 1
180#define B_SUSPEND 2
181
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530182#define PM_QOS_SAMPLE_SEC 2
183#define PM_QOS_THRESHOLD 400
184
Mayank Rana511f3b22016-08-02 12:00:11 -0700185struct dwc3_msm {
186 struct device *dev;
187 void __iomem *base;
188 void __iomem *ahb2phy_base;
189 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700190 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
192 struct list_head req_complete_list;
193 struct clk *xo_clk;
194 struct clk *core_clk;
195 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800196 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 struct clk *iface_clk;
198 struct clk *sleep_clk;
199 struct clk *utmi_clk;
200 unsigned int utmi_clk_rate;
201 struct clk *utmi_clk_src;
202 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530203 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530205 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 struct regulator *dwc3_gdsc;
207
208 struct usb_phy *hs_phy, *ss_phy;
209
210 struct dbm *dbm;
211
212 /* VBUS regulator for host mode */
213 struct regulator *vbus_reg;
214 int vbus_retry_count;
215 bool resume_pending;
216 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700217 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700218 struct work_struct resume_work;
219 struct work_struct restart_usb_work;
220 bool in_restart;
221 struct workqueue_struct *dwc3_wq;
222 struct delayed_work sm_work;
223 unsigned long inputs;
224 unsigned int max_power;
225 bool charging_disabled;
226 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227 u32 bus_perf_client;
228 struct msm_bus_scale_pdata *bus_scale_table;
229 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700230 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800232 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700233 unsigned int tx_fifo_size;
234 bool vbus_active;
235 bool suspend;
236 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700237 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700238 enum dwc3_id_state id_state;
239 unsigned long lpm_flags;
240#define MDWC3_SS_PHY_SUSPEND BIT(0)
241#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
242#define MDWC3_POWER_COLLAPSE BIT(2)
243
244 unsigned int irq_to_affin;
245 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530246 struct notifier_block usbdev_nb;
247 bool hc_died;
Pratham Pratapd76a1782017-11-14 20:50:31 +0530248 /* for usb connector either type-C or microAB */
249 bool type_c;
250 /* whether to vote for VBUS reg in host mode */
251 bool no_vbus_vote_type_c;
Mayank Rana511f3b22016-08-02 12:00:11 -0700252
253 struct extcon_dev *extcon_vbus;
254 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800255 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700256 struct notifier_block vbus_nb;
257 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800258 struct notifier_block eud_event_nb;
Mayank Rana54d60432017-07-18 12:10:04 -0700259 struct notifier_block host_restart_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700260
Jack Pham4d4e9342016-12-07 19:25:02 -0800261 struct notifier_block host_nb;
262
Mayank Rana511f3b22016-08-02 12:00:11 -0700263 atomic_t in_p3;
264 unsigned int lpm_to_suspend_delay;
265 bool init;
266 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800267 u32 num_gsi_event_buffers;
268 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530269 int pm_qos_latency;
270 struct pm_qos_request pm_qos_req_dma;
271 struct delayed_work perf_vote_work;
Hemant Kumar006fae42017-07-12 18:11:25 -0700272 struct delayed_work sdp_check;
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +0530273 struct mutex suspend_resume_mutex;
Mayank Rana511f3b22016-08-02 12:00:11 -0700274};
275
276#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
277#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
278#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
279
280#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
281#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
282#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
283
284#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
285#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
286#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
287
288#define DSTS_CONNECTSPD_SS 0x4
289
290
291static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
292static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800293static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana54d60432017-07-18 12:10:04 -0700294static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
295 unsigned long event, void *ptr);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700296
297static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
298{
299
300 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
301 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
302 && (speed <= dwc->maximum_speed));
303}
304
Mayank Rana511f3b22016-08-02 12:00:11 -0700305/**
306 *
307 * Read register with debug info.
308 *
309 * @base - DWC3 base virtual address.
310 * @offset - register offset.
311 *
312 * @return u32
313 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700314static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700315{
316 u32 val = ioread32(base + offset);
317 return val;
318}
319
320/**
321 * Read register masked field with debug info.
322 *
323 * @base - DWC3 base virtual address.
324 * @offset - register offset.
325 * @mask - register bitmask.
326 *
327 * @return u32
328 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700329static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700330 u32 offset,
331 const u32 mask)
332{
Mayank Ranad796cab2017-07-11 15:34:12 -0700333 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700334 u32 val = ioread32(base + offset);
335
336 val &= mask; /* clear other bits */
337 val >>= shift;
338 return val;
339}
340
341/**
342 *
343 * Write register with debug info.
344 *
345 * @base - DWC3 base virtual address.
346 * @offset - register offset.
347 * @val - value to write.
348 *
349 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700350static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700351{
352 iowrite32(val, base + offset);
353}
354
355/**
356 * Write register masked field with debug info.
357 *
358 * @base - DWC3 base virtual address.
359 * @offset - register offset.
360 * @mask - register bitmask.
361 * @val - value to write.
362 *
363 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700364static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700365 const u32 mask, u32 val)
366{
Mayank Ranad796cab2017-07-11 15:34:12 -0700367 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700368 u32 tmp = ioread32(base + offset);
369
370 tmp &= ~mask; /* clear written bits */
371 val = tmp | (val << shift);
372 iowrite32(val, base + offset);
373}
374
375/**
376 * Write register and read back masked value to confirm it is written
377 *
378 * @base - DWC3 base virtual address.
379 * @offset - register offset.
380 * @mask - register bitmask specifying what should be updated
381 * @val - value to write.
382 *
383 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700384static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700385 const u32 mask, u32 val)
386{
387 u32 write_val, tmp = ioread32(base + offset);
388
389 tmp &= ~mask; /* retain other bits */
390 write_val = tmp | val;
391
392 iowrite32(write_val, base + offset);
393
394 /* Read back to see if val was written */
395 tmp = ioread32(base + offset);
396 tmp &= mask; /* clear other bits */
397
398 if (tmp != val)
399 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
400 __func__, val, offset);
401}
402
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800403static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
404{
405 int i, num_ports;
406 u32 reg;
407
408 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
409 num_ports = HCS_MAX_PORTS(reg);
410
411 for (i = 0; i < num_ports; i++) {
412 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
413 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
414 return true;
415 }
416
417 return false;
418}
419
Mayank Rana511f3b22016-08-02 12:00:11 -0700420static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
421{
422 int i, num_ports;
423 u32 reg;
424
425 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
426 num_ports = HCS_MAX_PORTS(reg);
427
428 for (i = 0; i < num_ports; i++) {
429 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
430 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
431 return true;
432 }
433
434 return false;
435}
436
437static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
438{
439 u8 speed;
440
441 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
442 return !!(speed & DSTS_CONNECTSPD_SS);
443}
444
445static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
446{
447 if (mdwc->in_host_mode)
448 return dwc3_msm_is_host_superspeed(mdwc);
449
450 return dwc3_msm_is_dev_superspeed(mdwc);
451}
452
453#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
454/**
455 * Configure the DBM with the BAM's data fifo.
456 * This function is called by the USB BAM Driver
457 * upon initialization.
458 *
459 * @ep - pointer to usb endpoint.
460 * @addr - address of data fifo.
461 * @size - size of data fifo.
462 *
463 */
464int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
465 u32 size, u8 dst_pipe_idx)
466{
467 struct dwc3_ep *dep = to_dwc3_ep(ep);
468 struct dwc3 *dwc = dep->dwc;
469 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
470
471 dev_dbg(mdwc->dev, "%s\n", __func__);
472
473 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
474 dst_pipe_idx);
475}
476
477
478/**
479* Cleanups for msm endpoint on request complete.
480*
481* Also call original request complete.
482*
483* @usb_ep - pointer to usb_ep instance.
484* @request - pointer to usb_request instance.
485*
486* @return int - 0 on success, negative on error.
487*/
488static void dwc3_msm_req_complete_func(struct usb_ep *ep,
489 struct usb_request *request)
490{
491 struct dwc3_ep *dep = to_dwc3_ep(ep);
492 struct dwc3 *dwc = dep->dwc;
493 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
494 struct dwc3_msm_req_complete *req_complete = NULL;
495
496 /* Find original request complete function and remove it from list */
497 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
498 if (req_complete->req == request)
499 break;
500 }
501 if (!req_complete || req_complete->req != request) {
502 dev_err(dep->dwc->dev, "%s: could not find the request\n",
503 __func__);
504 return;
505 }
506 list_del(&req_complete->list_item);
507
508 /*
509 * Release another one TRB to the pool since DBM queue took 2 TRBs
510 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
511 * released only one.
512 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700513 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700514
515 /* Unconfigure dbm ep */
516 dbm_ep_unconfig(mdwc->dbm, dep->number);
517
518 /*
519 * If this is the last endpoint we unconfigured, than reset also
520 * the event buffers; unless unconfiguring the ep due to lpm,
521 * in which case the event buffer only gets reset during the
522 * block reset.
523 */
524 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
525 !dbm_reset_ep_after_lpm(mdwc->dbm))
526 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
527
528 /*
529 * Call original complete function, notice that dwc->lock is already
530 * taken by the caller of this function (dwc3_gadget_giveback()).
531 */
532 request->complete = req_complete->orig_complete;
533 if (request->complete)
534 request->complete(ep, request);
535
536 kfree(req_complete);
537}
538
539
540/**
541* Helper function
542*
543* Reset DBM endpoint.
544*
545* @mdwc - pointer to dwc3_msm instance.
546* @dep - pointer to dwc3_ep instance.
547*
548* @return int - 0 on success, negative on error.
549*/
550static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
551{
552 int ret;
553
554 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
555
556 /* Reset the dbm endpoint */
557 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
558 if (ret) {
559 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
560 __func__);
561 return ret;
562 }
563
564 /*
565 * The necessary delay between asserting and deasserting the dbm ep
566 * reset is based on the number of active endpoints. If there is more
567 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
568 * delay will suffice.
569 */
570 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
571 usleep_range(1000, 1200);
572 else
573 udelay(10);
574 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
575 if (ret) {
576 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
577 __func__);
578 return ret;
579 }
580
581 return 0;
582}
583
584/**
585* Reset the DBM endpoint which is linked to the given USB endpoint.
586*
587* @usb_ep - pointer to usb_ep instance.
588*
589* @return int - 0 on success, negative on error.
590*/
591
592int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
593{
594 struct dwc3_ep *dep = to_dwc3_ep(ep);
595 struct dwc3 *dwc = dep->dwc;
596 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
597
598 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
599}
600EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
601
602
603/**
604* Helper function.
605* See the header of the dwc3_msm_ep_queue function.
606*
607* @dwc3_ep - pointer to dwc3_ep instance.
608* @req - pointer to dwc3_request instance.
609*
610* @return int - 0 on success, negative on error.
611*/
612static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
613{
614 struct dwc3_trb *trb;
615 struct dwc3_trb *trb_link;
616 struct dwc3_gadget_ep_cmd_params params;
617 u32 cmd;
618 int ret = 0;
619
Mayank Rana83ad5822016-08-09 14:17:22 -0700620 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700621 * this request is issued with start transfer. The request will be out
622 * from this list in 2 cases. The first is that the transfer will be
623 * completed (not if the transfer is endless using a circular TRBs with
624 * with link TRB). The second case is an option to do stop stransfer,
625 * this can be initiated by the function driver when calling dequeue.
626 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700627 req->started = true;
628 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700629
630 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700631 trb = &dep->trb_pool[dep->trb_enqueue];
632 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700633 memset(trb, 0, sizeof(*trb));
634
635 req->trb = trb;
636 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
637 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
638 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
639 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
640 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
641
642 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700643 trb_link = &dep->trb_pool[dep->trb_enqueue];
644 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700645 memset(trb_link, 0, sizeof(*trb_link));
646
647 trb_link->bpl = lower_32_bits(req->trb_dma);
648 trb_link->bph = DBM_TRB_BIT |
649 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
650 trb_link->size = 0;
651 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
652
653 /*
654 * Now start the transfer
655 */
656 memset(&params, 0, sizeof(params));
657 params.param0 = 0; /* TDAddr High */
658 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
659
660 /* DBM requires IOC to be set */
661 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700662 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700663 if (ret < 0) {
664 dev_dbg(dep->dwc->dev,
665 "%s: failed to send STARTTRANSFER command\n",
666 __func__);
667
668 list_del(&req->list);
669 return ret;
670 }
671 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700672 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700673
674 return ret;
675}
676
677/**
678* Queue a usb request to the DBM endpoint.
679* This function should be called after the endpoint
680* was enabled by the ep_enable.
681*
682* This function prepares special structure of TRBs which
683* is familiar with the DBM HW, so it will possible to use
684* this endpoint in DBM mode.
685*
686* The TRBs prepared by this function, is one normal TRB
687* which point to a fake buffer, followed by a link TRB
688* that points to the first TRB.
689*
690* The API of this function follow the regular API of
691* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
692*
693* @usb_ep - pointer to usb_ep instance.
694* @request - pointer to usb_request instance.
695* @gfp_flags - possible flags.
696*
697* @return int - 0 on success, negative on error.
698*/
699static int dwc3_msm_ep_queue(struct usb_ep *ep,
700 struct usb_request *request, gfp_t gfp_flags)
701{
702 struct dwc3_request *req = to_dwc3_request(request);
703 struct dwc3_ep *dep = to_dwc3_ep(ep);
704 struct dwc3 *dwc = dep->dwc;
705 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
706 struct dwc3_msm_req_complete *req_complete;
707 unsigned long flags;
708 int ret = 0, size;
709 u8 bam_pipe;
710 bool producer;
711 bool disable_wb;
712 bool internal_mem;
713 bool ioc;
714 bool superspeed;
715
716 if (!(request->udc_priv & MSM_SPS_MODE)) {
717 /* Not SPS mode, call original queue */
718 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
719 __func__);
720
721 return (mdwc->original_ep_ops[dep->number])->queue(ep,
722 request,
723 gfp_flags);
724 }
725
726 /* HW restriction regarding TRB size (8KB) */
727 if (req->request.length < 0x2000) {
728 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
729 return -EINVAL;
730 }
731
732 /*
733 * Override req->complete function, but before doing that,
734 * store it's original pointer in the req_complete_list.
735 */
736 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
737 if (!req_complete)
738 return -ENOMEM;
739
740 req_complete->req = request;
741 req_complete->orig_complete = request->complete;
742 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
743 request->complete = dwc3_msm_req_complete_func;
744
745 /*
746 * Configure the DBM endpoint
747 */
748 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
749 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
750 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
751 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
752 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
753
754 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
755 disable_wb, internal_mem, ioc);
756 if (ret < 0) {
757 dev_err(mdwc->dev,
758 "error %d after calling dbm_ep_config\n", ret);
759 return ret;
760 }
761
762 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
763 __func__, request, ep->name, request->length);
764 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
765 dbm_event_buffer_config(mdwc->dbm,
766 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
767 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
768 DWC3_GEVNTSIZ_SIZE(size));
769
770 /*
771 * We must obtain the lock of the dwc3 core driver,
772 * including disabling interrupts, so we will be sure
773 * that we are the only ones that configure the HW device
774 * core and ensure that we queuing the request will finish
775 * as soon as possible so we will release back the lock.
776 */
777 spin_lock_irqsave(&dwc->lock, flags);
778 if (!dep->endpoint.desc) {
779 dev_err(mdwc->dev,
780 "%s: trying to queue request %p to disabled ep %s\n",
781 __func__, request, ep->name);
782 ret = -EPERM;
783 goto err;
784 }
785
786 if (dep->number == 0 || dep->number == 1) {
787 dev_err(mdwc->dev,
788 "%s: trying to queue dbm request %p to control ep %s\n",
789 __func__, request, ep->name);
790 ret = -EPERM;
791 goto err;
792 }
793
794
Mayank Rana83ad5822016-08-09 14:17:22 -0700795 if (dep->trb_dequeue != dep->trb_enqueue ||
796 !list_empty(&dep->pending_list)
797 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700798 dev_err(mdwc->dev,
799 "%s: trying to queue dbm request %p tp ep %s\n",
800 __func__, request, ep->name);
801 ret = -EPERM;
802 goto err;
803 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700804 dep->trb_dequeue = 0;
805 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700806 }
807
808 ret = __dwc3_msm_ep_queue(dep, req);
809 if (ret < 0) {
810 dev_err(mdwc->dev,
811 "error %d after calling __dwc3_msm_ep_queue\n", ret);
812 goto err;
813 }
814
815 spin_unlock_irqrestore(&dwc->lock, flags);
816 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
817 dbm_set_speed(mdwc->dbm, (u8)superspeed);
818
819 return 0;
820
821err:
822 spin_unlock_irqrestore(&dwc->lock, flags);
823 kfree(req_complete);
824 return ret;
825}
826
827/*
828* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
829*
830* @usb_ep - pointer to usb_ep instance.
831*
832* @return int - XferRscIndex
833*/
834static inline int gsi_get_xfer_index(struct usb_ep *ep)
835{
836 struct dwc3_ep *dep = to_dwc3_ep(ep);
837
838 return dep->resource_index;
839}
840
841/*
842* Fills up the GSI channel information needed in call to IPA driver
843* for GSI channel creation.
844*
845* @usb_ep - pointer to usb_ep instance.
846* @ch_info - output parameter with requested channel info
847*/
848static void gsi_get_channel_info(struct usb_ep *ep,
849 struct gsi_channel_info *ch_info)
850{
851 struct dwc3_ep *dep = to_dwc3_ep(ep);
852 int last_trb_index = 0;
853 struct dwc3 *dwc = dep->dwc;
854 struct usb_gsi_request *request = ch_info->ch_req;
855
856 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
857 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700858 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
859
Mayank Rana511f3b22016-08-02 12:00:11 -0700860 ch_info->depcmd_hi_addr = 0;
861
862 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
863 &dep->trb_pool[0]);
864 /* Convert to multipled of 1KB */
865 ch_info->const_buffer_size = request->buf_len/1024;
866
867 /* IN direction */
868 if (dep->direction) {
869 /*
870 * Multiply by size of each TRB for xfer_ring_len in bytes.
871 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
872 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
873 */
874 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
875 last_trb_index = 2 * request->num_bufs + 2;
876 } else { /* OUT direction */
877 /*
878 * Multiply by size of each TRB for xfer_ring_len in bytes.
879 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
880 * LINK TRB.
881 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700882 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
883 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700884 }
885
886 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
887 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
888 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
889 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
890 DWC3_GEVNTCOUNT(ep->ep_intr_num));
891 ch_info->gevntcount_hi_addr = 0;
892
893 dev_dbg(dwc->dev,
894 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
895 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
896 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
897}
898
899/*
900* Perform StartXfer on GSI EP. Stores XferRscIndex.
901*
902* @usb_ep - pointer to usb_ep instance.
903*
904* @return int - 0 on success
905*/
906static int gsi_startxfer_for_ep(struct usb_ep *ep)
907{
908 int ret;
909 struct dwc3_gadget_ep_cmd_params params;
910 u32 cmd;
911 struct dwc3_ep *dep = to_dwc3_ep(ep);
912 struct dwc3 *dwc = dep->dwc;
913
914 memset(&params, 0, sizeof(params));
915 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
916 params.param0 |= (ep->ep_intr_num << 16);
917 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
918 &dep->trb_pool[0]));
919 cmd = DWC3_DEPCMD_STARTTRANSFER;
920 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700921 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700922
923 if (ret < 0)
924 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700925 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700926 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
927 return ret;
928}
929
930/*
931* Store Ring Base and Doorbell Address for GSI EP
932* for GSI channel creation.
933*
934* @usb_ep - pointer to usb_ep instance.
935* @dbl_addr - Doorbell address obtained from IPA driver
936*/
937static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
938{
939 struct dwc3_ep *dep = to_dwc3_ep(ep);
940 struct dwc3 *dwc = dep->dwc;
941 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
942 int n = ep->ep_intr_num - 1;
943
944 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
945 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
946 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
947
948 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
949 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
950 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
951 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
952}
953
954/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700955* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700956*
957* @usb_ep - pointer to usb_ep instance.
958* @request - pointer to GSI request. This is used to pass in the
959* address of the GSI doorbell obtained from IPA driver
960*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700961static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700962{
963 void __iomem *gsi_dbl_address_lsb;
964 void __iomem *gsi_dbl_address_msb;
965 dma_addr_t offset;
966 u64 dbl_addr = *((u64 *)request->buf_base_addr);
967 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
968 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700969 struct dwc3_ep *dep = to_dwc3_ep(ep);
970 struct dwc3 *dwc = dep->dwc;
971 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700972 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
973 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700974
975 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
976 dbl_lo_addr, sizeof(u32));
977 if (!gsi_dbl_address_lsb)
978 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
979
980 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
981 dbl_hi_addr, sizeof(u32));
982 if (!gsi_dbl_address_msb)
983 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
984
985 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700986 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
987 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700988
989 writel_relaxed(offset, gsi_dbl_address_lsb);
990 writel_relaxed(0, gsi_dbl_address_msb);
991}
992
993/*
994* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
995*
996* @usb_ep - pointer to usb_ep instance.
997* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
998*
999* @return int - 0 on success
1000*/
1001static int gsi_updatexfer_for_ep(struct usb_ep *ep,
1002 struct usb_gsi_request *request)
1003{
1004 int i;
1005 int ret;
1006 u32 cmd;
1007 int num_trbs = request->num_bufs + 1;
1008 struct dwc3_trb *trb;
1009 struct dwc3_gadget_ep_cmd_params params;
1010 struct dwc3_ep *dep = to_dwc3_ep(ep);
1011 struct dwc3 *dwc = dep->dwc;
1012
1013 for (i = 0; i < num_trbs - 1; i++) {
1014 trb = &dep->trb_pool[i];
1015 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1016 }
1017
1018 memset(&params, 0, sizeof(params));
1019 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1020 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001021 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001022 dep->flags |= DWC3_EP_BUSY;
1023 if (ret < 0)
1024 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1025 return ret;
1026}
1027
1028/*
1029* Perform EndXfer on particular GSI EP.
1030*
1031* @usb_ep - pointer to usb_ep instance.
1032*/
1033static void gsi_endxfer_for_ep(struct usb_ep *ep)
1034{
1035 struct dwc3_ep *dep = to_dwc3_ep(ep);
1036 struct dwc3 *dwc = dep->dwc;
1037
1038 dwc3_stop_active_transfer(dwc, dep->number, true);
1039}
1040
1041/*
1042* Allocates and configures TRBs for GSI EPs.
1043*
1044* @usb_ep - pointer to usb_ep instance.
1045* @request - pointer to GSI request.
1046*
1047* @return int - 0 on success
1048*/
1049static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1050{
1051 int i = 0;
1052 dma_addr_t buffer_addr = req->dma;
1053 struct dwc3_ep *dep = to_dwc3_ep(ep);
1054 struct dwc3 *dwc = dep->dwc;
1055 struct dwc3_trb *trb;
1056 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001057 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001058
Mayank Ranae0a427e2017-09-18 16:56:26 -07001059 dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
1060 num_trbs * sizeof(struct dwc3_trb),
1061 &dep->trb_pool_dma, GFP_KERNEL);
1062
1063 if (!dep->trb_pool) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001064 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1065 dep->name);
1066 return -ENOMEM;
1067 }
1068
1069 dep->num_trbs = num_trbs;
Mayank Rana511f3b22016-08-02 12:00:11 -07001070 /* IN direction */
1071 if (dep->direction) {
1072 for (i = 0; i < num_trbs ; i++) {
1073 trb = &dep->trb_pool[i];
1074 memset(trb, 0, sizeof(*trb));
1075 /* Set up first n+1 TRBs for ZLPs */
1076 if (i < (req->num_bufs + 1)) {
1077 trb->bpl = 0;
1078 trb->bph = 0;
1079 trb->size = 0;
1080 trb->ctrl = DWC3_TRBCTL_NORMAL
1081 | DWC3_TRB_CTRL_IOC;
1082 continue;
1083 }
1084
1085 /* Setup n TRBs pointing to valid buffers */
1086 trb->bpl = lower_32_bits(buffer_addr);
1087 trb->bph = 0;
1088 trb->size = 0;
1089 trb->ctrl = DWC3_TRBCTL_NORMAL
1090 | DWC3_TRB_CTRL_IOC;
1091 buffer_addr += req->buf_len;
1092
1093 /* Set up the Link TRB at the end */
1094 if (i == (num_trbs - 1)) {
1095 trb->bpl = dwc3_trb_dma_offset(dep,
1096 &dep->trb_pool[0]);
1097 trb->bph = (1 << 23) | (1 << 21)
1098 | (ep->ep_intr_num << 16);
1099 trb->size = 0;
1100 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1101 | DWC3_TRB_CTRL_HWO;
1102 }
1103 }
1104 } else { /* OUT direction */
1105
1106 for (i = 0; i < num_trbs ; i++) {
1107
1108 trb = &dep->trb_pool[i];
1109 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001110 /* Setup LINK TRB to start with TRB ring */
1111 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001112 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001113 &dep->trb_pool[1]);
1114 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1115 } else if (i == (num_trbs - 1)) {
1116 /* Set up the Link TRB at the end */
1117 trb->bpl = dwc3_trb_dma_offset(dep,
1118 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001119 trb->bph = (1 << 23) | (1 << 21)
1120 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001121 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1122 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001123 } else {
1124 trb->bpl = lower_32_bits(buffer_addr);
1125 trb->size = req->buf_len;
1126 buffer_addr += req->buf_len;
1127 trb->ctrl = DWC3_TRBCTL_NORMAL
1128 | DWC3_TRB_CTRL_IOC
1129 | DWC3_TRB_CTRL_CSP
1130 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001131 }
1132 }
1133 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001134
1135 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1136 trb = &dep->trb_pool[0];
1137 if (trb) {
1138 for (i = 0; i < num_trbs; i++) {
1139 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1140 i, (unsigned long)dwc3_trb_dma_offset(dep,
1141 &dep->trb_pool[i]), trb->bpl, trb->bph,
1142 trb->size, trb->ctrl);
1143 trb++;
1144 }
1145 }
1146
Mayank Rana511f3b22016-08-02 12:00:11 -07001147 return 0;
1148}
1149
1150/*
1151* Frees TRBs for GSI EPs.
1152*
1153* @usb_ep - pointer to usb_ep instance.
1154*
1155*/
1156static void gsi_free_trbs(struct usb_ep *ep)
1157{
1158 struct dwc3_ep *dep = to_dwc3_ep(ep);
Mayank Ranae0a427e2017-09-18 16:56:26 -07001159 struct dwc3 *dwc = dep->dwc;
Mayank Rana511f3b22016-08-02 12:00:11 -07001160
1161 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1162 return;
1163
1164 /* Free TRBs and TRB pool for EP */
Mayank Ranae0a427e2017-09-18 16:56:26 -07001165 if (dep->trb_pool_dma) {
1166 dma_free_coherent(dwc->sysdev,
1167 dep->num_trbs * sizeof(struct dwc3_trb),
1168 dep->trb_pool,
1169 dep->trb_pool_dma);
Mayank Rana511f3b22016-08-02 12:00:11 -07001170 dep->trb_pool = NULL;
1171 dep->trb_pool_dma = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07001172 }
1173}
1174/*
1175* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1176*
1177* @usb_ep - pointer to usb_ep instance.
1178* @request - pointer to GSI request.
1179*/
1180static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1181{
1182 struct dwc3_ep *dep = to_dwc3_ep(ep);
1183 struct dwc3 *dwc = dep->dwc;
1184 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1185 struct dwc3_gadget_ep_cmd_params params;
1186 const struct usb_endpoint_descriptor *desc = ep->desc;
1187 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001188 u32 reg;
1189 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001190
1191 memset(&params, 0x00, sizeof(params));
1192
1193 /* Configure GSI EP */
1194 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1195 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1196
1197 /* Burst size is only needed in SuperSpeed mode */
1198 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1199 u32 burst = dep->endpoint.maxburst - 1;
1200
1201 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1202 }
1203
1204 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1205 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1206 | DWC3_DEPCFG_STREAM_EVENT_EN;
1207 dep->stream_capable = true;
1208 }
1209
1210 /* Set EP number */
1211 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1212
1213 /* Set interrupter number for GSI endpoints */
1214 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1215
1216 /* Enable XferInProgress and XferComplete Interrupts */
1217 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1218 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1219 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1220 /*
1221 * We must use the lower 16 TX FIFOs even though
1222 * HW might have more
1223 */
1224 /* Remove FIFO Number for GSI EP*/
1225 if (dep->direction)
1226 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1227
1228 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1229
1230 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1231 params.param0, params.param1, params.param2, dep->name);
1232
Mayank Rana83ad5822016-08-09 14:17:22 -07001233 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001234
1235 /* Set XferRsc Index for GSI EP */
1236 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001237 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1238 if (ret)
1239 return;
1240
Mayank Rana511f3b22016-08-02 12:00:11 -07001241 memset(&params, 0x00, sizeof(params));
1242 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001243 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001244 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1245
1246 dep->endpoint.desc = desc;
1247 dep->comp_desc = comp_desc;
1248 dep->type = usb_endpoint_type(desc);
1249 dep->flags |= DWC3_EP_ENABLED;
1250 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1251 reg |= DWC3_DALEPENA_EP(dep->number);
1252 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1253 }
1254
1255}
1256
1257/*
1258* Enables USB wrapper for GSI
1259*
1260* @usb_ep - pointer to usb_ep instance.
1261*/
1262static void gsi_enable(struct usb_ep *ep)
1263{
1264 struct dwc3_ep *dep = to_dwc3_ep(ep);
1265 struct dwc3 *dwc = dep->dwc;
1266 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1267
1268 dwc3_msm_write_reg_field(mdwc->base,
1269 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1270 dwc3_msm_write_reg_field(mdwc->base,
1271 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1272 dwc3_msm_write_reg_field(mdwc->base,
1273 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1274 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1275 dwc3_msm_write_reg_field(mdwc->base,
1276 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1277}
1278
1279/*
1280* Block or allow doorbell towards GSI
1281*
1282* @usb_ep - pointer to usb_ep instance.
1283* @request - pointer to GSI request. In this case num_bufs is used as a bool
1284* to set or clear the doorbell bit
1285*/
1286static void gsi_set_clear_dbell(struct usb_ep *ep,
1287 bool block_db)
1288{
1289
1290 struct dwc3_ep *dep = to_dwc3_ep(ep);
1291 struct dwc3 *dwc = dep->dwc;
1292 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1293
1294 dwc3_msm_write_reg_field(mdwc->base,
1295 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1296}
1297
1298/*
1299* Performs necessary checks before stopping GSI channels
1300*
1301* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1302*/
1303static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1304{
1305 u32 timeout = 1500;
1306 u32 reg = 0;
1307 struct dwc3_ep *dep = to_dwc3_ep(ep);
1308 struct dwc3 *dwc = dep->dwc;
1309 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1310
1311 while (dwc3_msm_read_reg_field(mdwc->base,
1312 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1313 if (!timeout--) {
1314 dev_err(mdwc->dev,
1315 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1316 return false;
1317 }
1318 }
1319 /* Check for U3 only if we are not handling Function Suspend */
1320 if (!f_suspend) {
1321 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1322 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1323 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1324 return false;
1325 }
1326 }
1327
1328 return true;
1329}
1330
1331
1332/**
1333* Performs GSI operations or GSI EP related operations.
1334*
1335* @usb_ep - pointer to usb_ep instance.
1336* @op_data - pointer to opcode related data.
1337* @op - GSI related or GSI EP related op code.
1338*
1339* @return int - 0 on success, negative on error.
1340* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1341*/
1342static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1343 void *op_data, enum gsi_ep_op op)
1344{
1345 u32 ret = 0;
1346 struct dwc3_ep *dep = to_dwc3_ep(ep);
1347 struct dwc3 *dwc = dep->dwc;
1348 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1349 struct usb_gsi_request *request;
1350 struct gsi_channel_info *ch_info;
1351 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001352 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001353
1354 switch (op) {
1355 case GSI_EP_OP_PREPARE_TRBS:
1356 request = (struct usb_gsi_request *)op_data;
1357 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1358 ret = gsi_prepare_trbs(ep, request);
1359 break;
1360 case GSI_EP_OP_FREE_TRBS:
1361 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1362 gsi_free_trbs(ep);
1363 break;
1364 case GSI_EP_OP_CONFIG:
1365 request = (struct usb_gsi_request *)op_data;
1366 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001367 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001368 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001369 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001370 break;
1371 case GSI_EP_OP_STARTXFER:
1372 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001373 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001374 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001375 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001376 break;
1377 case GSI_EP_OP_GET_XFER_IDX:
1378 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1379 ret = gsi_get_xfer_index(ep);
1380 break;
1381 case GSI_EP_OP_STORE_DBL_INFO:
1382 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1383 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1384 break;
1385 case GSI_EP_OP_ENABLE_GSI:
1386 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1387 gsi_enable(ep);
1388 break;
1389 case GSI_EP_OP_GET_CH_INFO:
1390 ch_info = (struct gsi_channel_info *)op_data;
1391 gsi_get_channel_info(ep, ch_info);
1392 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001393 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001394 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001395 dbg_print(0xFF, "RING_DB", 0, ep->name);
1396 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001397 break;
1398 case GSI_EP_OP_UPDATEXFER:
1399 request = (struct usb_gsi_request *)op_data;
1400 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001401 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001402 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001403 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001404 break;
1405 case GSI_EP_OP_ENDXFER:
1406 request = (struct usb_gsi_request *)op_data;
1407 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001408 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001409 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001410 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001411 break;
1412 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1413 block_db = *((bool *)op_data);
1414 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1415 block_db);
1416 gsi_set_clear_dbell(ep, block_db);
1417 break;
1418 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1419 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1420 f_suspend = *((bool *)op_data);
1421 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1422 break;
1423 case GSI_EP_OP_DISABLE:
1424 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1425 ret = ep->ops->disable(ep);
1426 break;
1427 default:
1428 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1429 }
1430
1431 return ret;
1432}
1433
1434/**
1435 * Configure MSM endpoint.
1436 * This function do specific configurations
1437 * to an endpoint which need specific implementaion
1438 * in the MSM architecture.
1439 *
1440 * This function should be called by usb function/class
1441 * layer which need a support from the specific MSM HW
1442 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1443 *
1444 * @ep - a pointer to some usb_ep instance
1445 *
1446 * @return int - 0 on success, negetive on error.
1447 */
1448int msm_ep_config(struct usb_ep *ep)
1449{
1450 struct dwc3_ep *dep = to_dwc3_ep(ep);
1451 struct dwc3 *dwc = dep->dwc;
1452 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1453 struct usb_ep_ops *new_ep_ops;
1454
1455
1456 /* Save original ep ops for future restore*/
1457 if (mdwc->original_ep_ops[dep->number]) {
1458 dev_err(mdwc->dev,
1459 "ep [%s,%d] already configured as msm endpoint\n",
1460 ep->name, dep->number);
1461 return -EPERM;
1462 }
1463 mdwc->original_ep_ops[dep->number] = ep->ops;
1464
1465 /* Set new usb ops as we like */
1466 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1467 if (!new_ep_ops)
1468 return -ENOMEM;
1469
1470 (*new_ep_ops) = (*ep->ops);
1471 new_ep_ops->queue = dwc3_msm_ep_queue;
1472 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1473 ep->ops = new_ep_ops;
1474
1475 /*
1476 * Do HERE more usb endpoint configurations
1477 * which are specific to MSM.
1478 */
1479
1480 return 0;
1481}
1482EXPORT_SYMBOL(msm_ep_config);
1483
1484/**
1485 * Un-configure MSM endpoint.
1486 * Tear down configurations done in the
1487 * dwc3_msm_ep_config function.
1488 *
1489 * @ep - a pointer to some usb_ep instance
1490 *
1491 * @return int - 0 on success, negative on error.
1492 */
1493int msm_ep_unconfig(struct usb_ep *ep)
1494{
1495 struct dwc3_ep *dep = to_dwc3_ep(ep);
1496 struct dwc3 *dwc = dep->dwc;
1497 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1498 struct usb_ep_ops *old_ep_ops;
1499
1500 /* Restore original ep ops */
1501 if (!mdwc->original_ep_ops[dep->number]) {
1502 dev_err(mdwc->dev,
1503 "ep [%s,%d] was not configured as msm endpoint\n",
1504 ep->name, dep->number);
1505 return -EINVAL;
1506 }
1507 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1508 ep->ops = mdwc->original_ep_ops[dep->number];
1509 mdwc->original_ep_ops[dep->number] = NULL;
1510 kfree(old_ep_ops);
1511
1512 /*
1513 * Do HERE more usb endpoint un-configurations
1514 * which are specific to MSM.
1515 */
1516
1517 return 0;
1518}
1519EXPORT_SYMBOL(msm_ep_unconfig);
1520#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1521
1522static void dwc3_resume_work(struct work_struct *w);
1523
1524static void dwc3_restart_usb_work(struct work_struct *w)
1525{
1526 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1527 restart_usb_work);
1528 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1529 unsigned int timeout = 50;
1530
1531 dev_dbg(mdwc->dev, "%s\n", __func__);
1532
1533 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1534 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1535 return;
1536 }
1537
1538 /* guard against concurrent VBUS handling */
1539 mdwc->in_restart = true;
1540
1541 if (!mdwc->vbus_active) {
1542 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1543 dwc->err_evt_seen = false;
1544 mdwc->in_restart = false;
1545 return;
1546 }
1547
Mayank Rana08e41922017-03-02 15:25:48 -08001548 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001549 /* Reset active USB connection */
1550 dwc3_resume_work(&mdwc->resume_work);
1551
1552 /* Make sure disconnect is processed before sending connect */
1553 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1554 msleep(20);
1555
1556 if (!timeout) {
1557 dev_dbg(mdwc->dev,
1558 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001559 dbg_event(0xFF, "ReStart:RT SUSP",
1560 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001561 pm_runtime_suspend(mdwc->dev);
1562 }
1563
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301564 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001565 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301566 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001567 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001568
1569 dwc->err_evt_seen = false;
1570 flush_delayed_work(&mdwc->sm_work);
1571}
1572
Manu Gautam976fdfc2016-08-18 09:27:35 +05301573static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1574 unsigned long action, void *priv)
1575{
1576 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1577 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1578 struct usb_bus *bus = priv;
1579
1580 /* Interested only in recovery when HC dies */
1581 if (action != USB_BUS_DIED)
1582 return 0;
1583
1584 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1585 /* Recovery already under process */
1586 if (mdwc->hc_died)
1587 return 0;
1588
1589 if (bus->controller != &dwc->xhci->dev) {
1590 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1591 return 0;
1592 }
1593
1594 mdwc->hc_died = true;
1595 schedule_delayed_work(&mdwc->sm_work, 0);
1596 return 0;
1597}
1598
1599
Mayank Rana511f3b22016-08-02 12:00:11 -07001600/*
1601 * Check whether the DWC3 requires resetting the ep
1602 * after going to Low Power Mode (lpm)
1603 */
1604bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1605{
1606 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1607 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1608
1609 return dbm_reset_ep_after_lpm(mdwc->dbm);
1610}
1611EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1612
1613/*
1614 * Config Global Distributed Switch Controller (GDSC)
1615 * to support controller power collapse
1616 */
1617static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1618{
1619 int ret;
1620
1621 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1622 return -EPERM;
1623
1624 if (on) {
1625 ret = regulator_enable(mdwc->dwc3_gdsc);
1626 if (ret) {
1627 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1628 return ret;
1629 }
1630 } else {
1631 ret = regulator_disable(mdwc->dwc3_gdsc);
1632 if (ret) {
1633 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1634 return ret;
1635 }
1636 }
1637
1638 return ret;
1639}
1640
1641static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1642{
1643 int ret = 0;
1644
1645 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001646 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001647 /* Using asynchronous block reset to the hardware */
1648 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1649 clk_disable_unprepare(mdwc->utmi_clk);
1650 clk_disable_unprepare(mdwc->sleep_clk);
1651 clk_disable_unprepare(mdwc->core_clk);
1652 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301653 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001654 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301655 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001656 } else {
1657 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301658 ret = reset_control_deassert(mdwc->core_reset);
1659 if (ret)
1660 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001661 ndelay(200);
1662 clk_prepare_enable(mdwc->iface_clk);
1663 clk_prepare_enable(mdwc->core_clk);
1664 clk_prepare_enable(mdwc->sleep_clk);
1665 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001666 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001667 }
1668
1669 return ret;
1670}
1671
1672static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1673{
1674 u32 guctl, gfladj = 0;
1675
1676 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1677 guctl &= ~DWC3_GUCTL_REFCLKPER;
1678
1679 /* GFLADJ register is used starting with revision 2.50a */
1680 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1681 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1682 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1683 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1684 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1685 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1686 }
1687
1688 /* Refer to SNPS Databook Table 6-55 for calculations used */
1689 switch (mdwc->utmi_clk_rate) {
1690 case 19200000:
1691 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1692 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1693 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1694 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1695 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1696 break;
1697 case 24000000:
1698 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1699 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1700 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1701 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1702 break;
1703 default:
1704 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1705 mdwc->utmi_clk_rate);
1706 break;
1707 }
1708
1709 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1710 if (gfladj)
1711 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1712}
1713
1714/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1715static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1716{
1717 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1718 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1719 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1720 BIT(2), 1);
1721
1722 /*
1723 * Enable master clock for RAMs to allow BAM to access RAMs when
1724 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1725 * are seen where RAM clocks get turned OFF in SS mode
1726 */
1727 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1728 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1729
1730}
1731
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001732static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1733{
1734 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1735 vbus_draw_work);
1736 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1737
1738 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1739}
1740
Mayank Rana511f3b22016-08-02 12:00:11 -07001741static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1742{
1743 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001744 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001745 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001746 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001747
1748 switch (event) {
1749 case DWC3_CONTROLLER_ERROR_EVENT:
1750 dev_info(mdwc->dev,
1751 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1752 dwc->irq_cnt);
1753
1754 dwc3_gadget_disable_irq(dwc);
1755
1756 /* prevent core from generating interrupts until recovery */
1757 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1758 reg |= DWC3_GCTL_CORESOFTRESET;
1759 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1760
1761 /* restart USB which performs full reset and reconnect */
1762 schedule_work(&mdwc->restart_usb_work);
1763 break;
1764 case DWC3_CONTROLLER_RESET_EVENT:
1765 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1766 /* HS & SSPHYs get reset as part of core soft reset */
1767 dwc3_msm_qscratch_reg_init(mdwc);
1768 break;
1769 case DWC3_CONTROLLER_POST_RESET_EVENT:
1770 dev_dbg(mdwc->dev,
1771 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1772
1773 /*
1774 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301775 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001776 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301777 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1778 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001779 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1780 dwc3_msm_read_reg(mdwc->base,
1781 QSCRATCH_GENERAL_CFG)
1782 | PIPE_UTMI_CLK_DIS);
1783
1784 usleep_range(2, 5);
1785
1786
1787 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1788 dwc3_msm_read_reg(mdwc->base,
1789 QSCRATCH_GENERAL_CFG)
1790 | PIPE_UTMI_CLK_SEL
1791 | PIPE3_PHYSTATUS_SW);
1792
1793 usleep_range(2, 5);
1794
1795 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1796 dwc3_msm_read_reg(mdwc->base,
1797 QSCRATCH_GENERAL_CFG)
1798 & ~PIPE_UTMI_CLK_DIS);
1799 }
1800
1801 dwc3_msm_update_ref_clk(mdwc);
1802 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1803 break;
1804 case DWC3_CONTROLLER_CONNDONE_EVENT:
1805 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1806 /*
1807 * Add power event if the dbm indicates coming out of L1 by
1808 * interrupt
1809 */
1810 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1811 dwc3_msm_write_reg_field(mdwc->base,
1812 PWR_EVNT_IRQ_MASK_REG,
1813 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1814
1815 atomic_set(&dwc->in_lpm, 0);
1816 break;
1817 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1818 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1819 if (dwc->enable_bus_suspend) {
1820 mdwc->suspend = dwc->b_suspend;
1821 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1822 }
1823 break;
1824 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1825 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001826 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001827 break;
1828 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1829 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001830 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001831 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001832 case DWC3_GSI_EVT_BUF_ALLOC:
1833 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1834
1835 if (!mdwc->num_gsi_event_buffers)
1836 break;
1837
1838 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1839 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1840 GFP_KERNEL);
1841 if (!mdwc->gsi_ev_buff) {
1842 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1843 break;
1844 }
1845
1846 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1847
1848 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1849 if (!evt)
1850 break;
1851 evt->dwc = dwc;
1852 evt->length = DWC3_EVENT_BUFFERS_SIZE;
Mayank Rana0e4c4432017-09-18 16:46:00 -07001853 evt->buf = dma_alloc_coherent(dwc->sysdev,
Mayank Ranaf4918d32016-12-15 13:35:55 -08001854 DWC3_EVENT_BUFFERS_SIZE,
1855 &evt->dma, GFP_KERNEL);
1856 if (!evt->buf) {
1857 dev_err(dwc->dev,
1858 "can't allocate gsi_evt_buf(%d)\n", i);
1859 break;
1860 }
1861 mdwc->gsi_ev_buff[i] = evt;
1862 }
1863 break;
1864 case DWC3_GSI_EVT_BUF_SETUP:
1865 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1866 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1867 evt = mdwc->gsi_ev_buff[i];
Mayank Rana0eb0db72017-10-03 13:46:32 -07001868 if (!evt)
1869 break;
1870
Mayank Ranaf4918d32016-12-15 13:35:55 -08001871 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1872 evt->buf, (unsigned long long) evt->dma,
1873 evt->length);
1874 memset(evt->buf, 0, evt->length);
1875 evt->lpos = 0;
1876 /*
1877 * Primary event buffer is programmed with registers
1878 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1879 * program USB GSI related event buffer with DWC3
1880 * controller.
1881 */
1882 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1883 lower_32_bits(evt->dma));
1884 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1885 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1886 DWC3_GEVENT_TYPE_GSI) |
1887 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1888 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1889 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1890 ((evt->length) & 0xffff));
1891 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1892 }
1893 break;
1894 case DWC3_GSI_EVT_BUF_CLEANUP:
1895 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001896 if (!mdwc->gsi_ev_buff)
1897 break;
1898
Mayank Ranaf4918d32016-12-15 13:35:55 -08001899 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1900 evt = mdwc->gsi_ev_buff[i];
1901 evt->lpos = 0;
1902 /*
1903 * Primary event buffer is programmed with registers
1904 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1905 * program USB GSI related event buffer with DWC3
1906 * controller.
1907 */
1908 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1909 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1910 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1911 DWC3_GEVNTSIZ_INTMASK |
1912 DWC3_GEVNTSIZ_SIZE((i+1)));
1913 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1914 }
1915 break;
1916 case DWC3_GSI_EVT_BUF_FREE:
1917 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001918 if (!mdwc->gsi_ev_buff)
1919 break;
1920
Mayank Ranaf4918d32016-12-15 13:35:55 -08001921 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1922 evt = mdwc->gsi_ev_buff[i];
1923 if (evt)
Mayank Rana0e4c4432017-09-18 16:46:00 -07001924 dma_free_coherent(dwc->sysdev, evt->length,
Mayank Ranaf4918d32016-12-15 13:35:55 -08001925 evt->buf, evt->dma);
1926 }
1927 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001928 default:
1929 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1930 break;
1931 }
1932}
1933
1934static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1935{
1936 int ret = 0;
1937
1938 if (core_reset) {
1939 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1940 if (ret)
1941 return;
1942
1943 usleep_range(1000, 1200);
1944 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1945 if (ret)
1946 return;
1947
1948 usleep_range(10000, 12000);
1949 }
1950
1951 if (mdwc->dbm) {
1952 /* Reset the DBM */
1953 dbm_soft_reset(mdwc->dbm, 1);
1954 usleep_range(1000, 1200);
1955 dbm_soft_reset(mdwc->dbm, 0);
1956
1957 /*enable DBM*/
1958 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1959 DBM_EN_MASK, 0x1);
1960 dbm_enable(mdwc->dbm);
1961 }
1962}
1963
1964static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1965{
1966 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1967 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301968 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001969
1970 /* Configure AHB2PHY for one wait state read/write */
1971 if (mdwc->ahb2phy_base) {
1972 clk_prepare_enable(mdwc->cfg_ahb_clk);
1973 val = readl_relaxed(mdwc->ahb2phy_base +
1974 PERIPH_SS_AHB2PHY_TOP_CFG);
1975 if (val != ONE_READ_WRITE_WAIT) {
1976 writel_relaxed(ONE_READ_WRITE_WAIT,
1977 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1978 /* complete above write before configuring USB PHY. */
1979 mb();
1980 }
1981 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1982 }
1983
1984 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001985 dbg_event(0xFF, "dwc3 init",
1986 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301987 ret = dwc3_core_pre_init(dwc);
1988 if (ret) {
1989 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1990 return;
1991 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001992 mdwc->init = true;
1993 }
1994
1995 dwc3_core_init(dwc);
1996 /* Re-configure event buffers */
1997 dwc3_event_buffers_setup(dwc);
1998}
1999
2000static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
2001{
2002 unsigned long timeout;
2003 u32 reg = 0;
2004
2005 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05302006 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002007 if (!atomic_read(&mdwc->in_p3)) {
2008 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
2009 return -EBUSY;
2010 }
2011 }
2012
2013 /* Clear previous L2 events */
2014 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2015 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2016
2017 /* Prepare HSPHY for suspend */
2018 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2019 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2020 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2021
2022 /* Wait for PHY to go into L2 */
2023 timeout = jiffies + msecs_to_jiffies(5);
2024 while (!time_after(jiffies, timeout)) {
2025 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2026 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2027 break;
2028 }
2029 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2030 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2031
2032 /* Clear L2 event bit */
2033 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2034 PWR_EVNT_LPM_IN_L2_MASK);
2035
2036 return 0;
2037}
2038
Mayank Rana511f3b22016-08-02 12:00:11 -07002039static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2040{
2041 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2042 int i, num_ports;
2043 u32 reg;
2044
2045 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2046 if (mdwc->in_host_mode) {
2047 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2048 num_ports = HCS_MAX_PORTS(reg);
2049 for (i = 0; i < num_ports; i++) {
2050 reg = dwc3_msm_read_reg(mdwc->base,
2051 USB3_PORTSC + i*0x10);
2052 if (reg & PORT_PE) {
2053 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2054 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2055 else if (DEV_LOWSPEED(reg))
2056 mdwc->hs_phy->flags |= PHY_LS_MODE;
2057 }
2058 }
2059 } else {
2060 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2061 dwc->gadget.speed == USB_SPEED_FULL)
2062 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2063 else if (dwc->gadget.speed == USB_SPEED_LOW)
2064 mdwc->hs_phy->flags |= PHY_LS_MODE;
2065 }
2066}
2067
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302068static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2069 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002070
Mayank Ranad339abe2017-05-31 09:19:49 -07002071static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2072 struct usb_irq *uirq, unsigned int polarity, bool enable)
2073{
2074 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2075
2076 if (uirq && enable && !uirq->enable) {
2077 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2078 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2079 /* clear any pending interrupt */
2080 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2081 irq_set_irq_type(uirq->irq, polarity);
2082 enable_irq_wake(uirq->irq);
2083 enable_irq(uirq->irq);
2084 uirq->enable = true;
2085 }
2086
2087 if (uirq && !enable && uirq->enable) {
2088 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2089 disable_irq_wake(uirq->irq);
2090 disable_irq_nosync(uirq->irq);
2091 uirq->enable = false;
2092 }
2093}
2094
2095static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2096{
2097 if (!enable)
2098 goto disable_usb_irq;
2099
2100 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2101 configure_usb_wakeup_interrupt(mdwc,
2102 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2103 IRQ_TYPE_EDGE_FALLING, enable);
2104 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2105 configure_usb_wakeup_interrupt(mdwc,
2106 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2107 IRQ_TYPE_EDGE_FALLING, enable);
2108 } else {
2109 configure_usb_wakeup_interrupt(mdwc,
2110 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2111 IRQ_TYPE_EDGE_RISING, true);
2112 configure_usb_wakeup_interrupt(mdwc,
2113 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2114 IRQ_TYPE_EDGE_RISING, true);
2115 }
2116
2117 configure_usb_wakeup_interrupt(mdwc,
2118 &mdwc->wakeup_irq[SS_PHY_IRQ],
2119 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2120 return;
2121
2122disable_usb_irq:
2123 configure_usb_wakeup_interrupt(mdwc,
2124 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2125 configure_usb_wakeup_interrupt(mdwc,
2126 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2127 configure_usb_wakeup_interrupt(mdwc,
2128 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2129}
2130
2131static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2132 struct usb_irq *uirq, bool enable)
2133{
2134 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2135
2136 if (uirq && enable && !uirq->enable) {
2137 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2138 enable_irq_wake(uirq->irq);
2139 enable_irq(uirq->irq);
2140 uirq->enable = true;
2141 }
2142
2143 if (uirq && !enable && uirq->enable) {
2144 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2145 disable_irq_wake(uirq->irq);
2146 disable_irq_nosync(uirq->irq);
Chandana Kishori Chiluveru268c2222017-12-14 12:51:04 +05302147 uirq->enable = false;
Mayank Ranad339abe2017-05-31 09:19:49 -07002148 }
2149}
2150
Mayank Rana511f3b22016-08-02 12:00:11 -07002151static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2152{
Mayank Rana83ad5822016-08-09 14:17:22 -07002153 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002154 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002155 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002156 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002157
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302158 mutex_lock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002159 if (atomic_read(&dwc->in_lpm)) {
2160 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302161 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002162 return 0;
2163 }
2164
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302165 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2166 msm_dwc3_perf_vote_update(mdwc, false);
2167
Mayank Rana511f3b22016-08-02 12:00:11 -07002168 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002169 evt = dwc->ev_buf;
2170 if ((evt->flags & DWC3_EVENT_PENDING)) {
2171 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002172 "%s: %d device events pending, abort suspend\n",
2173 __func__, evt->count / 4);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302174 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana83ad5822016-08-09 14:17:22 -07002175 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002176 }
2177 }
2178
2179 if (!mdwc->vbus_active && dwc->is_drd &&
2180 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2181 /*
2182 * In some cases, the pm_runtime_suspend may be called by
2183 * usb_bam when there is pending lpm flag. However, if this is
2184 * done when cable was disconnected and otg state has not
2185 * yet changed to IDLE, then it means OTG state machine
2186 * is running and we race against it. So cancel LPM for now,
2187 * and OTG state machine will go for LPM later, after completing
2188 * transition to IDLE state.
2189 */
2190 dev_dbg(mdwc->dev,
2191 "%s: cable disconnected while not in idle otg state\n",
2192 __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302193 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002194 return -EBUSY;
2195 }
2196
2197 /*
2198 * Check if device is not in CONFIGURED state
2199 * then check controller state of L2 and break
2200 * LPM sequence. Check this for device bus suspend case.
2201 */
2202 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2203 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2204 pr_err("%s(): Trying to go in LPM with state:%d\n",
2205 __func__, dwc->gadget.state);
2206 pr_err("%s(): LPM is not performed.\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302207 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002208 return -EBUSY;
2209 }
2210
2211 ret = dwc3_msm_prepare_suspend(mdwc);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302212 if (ret) {
2213 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002214 return ret;
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302215 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002216
Mayank Rana511f3b22016-08-02 12:00:11 -07002217 /* Disable core irq */
2218 if (dwc->irq)
2219 disable_irq(dwc->irq);
2220
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002221 if (work_busy(&dwc->bh_work))
2222 dbg_event(0xFF, "pend evt", 0);
2223
Mayank Rana511f3b22016-08-02 12:00:11 -07002224 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002225 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002226
2227 dwc3_set_phy_speed_flags(mdwc);
2228 /* Suspend HS PHY */
2229 usb_phy_set_suspend(mdwc->hs_phy, 1);
2230
2231 /* Suspend SS PHY */
Mayank Rana17f67e32017-08-15 10:41:28 -07002232 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002233 /* indicate phy about SS mode */
2234 if (dwc3_msm_is_superspeed(mdwc))
2235 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2236 usb_phy_set_suspend(mdwc->ss_phy, 1);
2237 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2238 }
2239
2240 /* make sure above writes are completed before turning off clocks */
2241 wmb();
2242
2243 /* Disable clocks */
2244 if (mdwc->bus_aggr_clk)
2245 clk_disable_unprepare(mdwc->bus_aggr_clk);
2246 clk_disable_unprepare(mdwc->utmi_clk);
2247
Hemant Kumar633dc332016-08-10 13:41:05 -07002248 /* Memory core: OFF, Memory periphery: OFF */
2249 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2250 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2251 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2252 }
2253
Mayank Rana511f3b22016-08-02 12:00:11 -07002254 clk_set_rate(mdwc->core_clk, 19200000);
2255 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302256 if (mdwc->noc_aggr_clk)
2257 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002258 /*
2259 * Disable iface_clk only after core_clk as core_clk has FSM
2260 * depedency on iface_clk. Hence iface_clk should be turned off
2261 * after core_clk is turned off.
2262 */
2263 clk_disable_unprepare(mdwc->iface_clk);
2264 /* USB PHY no more requires TCXO */
2265 clk_disable_unprepare(mdwc->xo_clk);
2266
2267 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002268 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002269 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2270 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2271 dwc3_msm_config_gdsc(mdwc, 0);
2272 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002273
Jack Pham9faa51df2017-04-03 18:13:40 -07002274 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002275 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002276 dev_dbg(mdwc->dev, "IOMMU detached\n");
2277 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002278 }
2279
2280 /* Remove bus voting */
2281 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002282 dbg_event(0xFF, "bus_devote_start", 0);
2283 ret = msm_bus_scale_client_update_request(
2284 mdwc->bus_perf_client, 0);
2285 dbg_event(0xFF, "bus_devote_finish", 0);
2286 if (ret)
2287 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002288 }
2289
2290 /*
2291 * release wakeup source with timeout to defer system suspend to
2292 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2293 * event is received.
2294 */
2295 if (mdwc->lpm_to_suspend_delay) {
2296 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2297 mdwc->lpm_to_suspend_delay);
2298 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2299 } else {
2300 pm_relax(mdwc->dev);
2301 }
2302
2303 atomic_set(&dwc->in_lpm, 1);
2304
2305 /*
2306 * with DCP or during cable disconnect, we dont require wakeup
2307 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2308 * case of host bus suspend and device bus suspend.
2309 */
2310 if (mdwc->vbus_active || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002311 if (mdwc->use_pdc_interrupts) {
2312 enable_usb_pdc_interrupt(mdwc, true);
2313 } else {
2314 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2315 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2316 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2317 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002318 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002319 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2320 }
2321
2322 dev_info(mdwc->dev, "DWC3 in low power mode\n");
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302323 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002324 return 0;
2325}
2326
2327static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2328{
2329 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002330 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002331 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002332 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002333
2334 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2335
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302336 mutex_lock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002337 if (!atomic_read(&dwc->in_lpm)) {
2338 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302339 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002340 return 0;
2341 }
2342
2343 pm_stay_awake(mdwc->dev);
2344
2345 /* Enable bus voting */
2346 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002347 dbg_event(0xFF, "bus_vote_start", 1);
2348 ret = msm_bus_scale_client_update_request(
2349 mdwc->bus_perf_client, 1);
2350 dbg_event(0xFF, "bus_vote_finish", 1);
2351 if (ret)
2352 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002353 }
2354
2355 /* Vote for TCXO while waking up USB HSPHY */
2356 ret = clk_prepare_enable(mdwc->xo_clk);
2357 if (ret)
2358 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2359 __func__, ret);
2360
2361 /* Restore controller power collapse */
2362 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2363 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2364 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302365 ret = reset_control_assert(mdwc->core_reset);
2366 if (ret)
2367 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2368 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002369 /* HW requires a short delay for reset to take place properly */
2370 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302371 ret = reset_control_deassert(mdwc->core_reset);
2372 if (ret)
2373 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2374 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002375 clk_prepare_enable(mdwc->sleep_clk);
2376 }
2377
2378 /*
2379 * Enable clocks
2380 * Turned ON iface_clk before core_clk due to FSM depedency.
2381 */
2382 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302383 if (mdwc->noc_aggr_clk)
2384 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002385
2386 core_clk_rate = mdwc->core_clk_rate;
2387 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2388 core_clk_rate = mdwc->core_clk_rate_hs;
2389 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2390 core_clk_rate);
2391 }
2392
2393 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002394 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002395
2396 /* set Memory core: ON, Memory periphery: ON */
2397 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2398 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2399
Mayank Rana511f3b22016-08-02 12:00:11 -07002400 clk_prepare_enable(mdwc->utmi_clk);
2401 if (mdwc->bus_aggr_clk)
2402 clk_prepare_enable(mdwc->bus_aggr_clk);
2403
2404 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002405 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2406 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002407 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2408 if (mdwc->typec_orientation == ORIENTATION_CC1)
2409 mdwc->ss_phy->flags |= PHY_LANE_A;
2410 if (mdwc->typec_orientation == ORIENTATION_CC2)
2411 mdwc->ss_phy->flags |= PHY_LANE_B;
2412 usb_phy_set_suspend(mdwc->ss_phy, 0);
2413 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2414 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2415 }
2416
2417 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2418 /* Resume HS PHY */
2419 usb_phy_set_suspend(mdwc->hs_phy, 0);
2420
2421 /* Recover from controller power collapse */
2422 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2423 u32 tmp;
2424
Jack Pham9faa51df2017-04-03 18:13:40 -07002425 if (mdwc->iommu_map) {
2426 ret = arm_iommu_attach_device(mdwc->dev,
2427 mdwc->iommu_map);
2428 if (ret)
2429 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2430 ret);
2431 else
2432 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2433 }
2434
Mayank Rana511f3b22016-08-02 12:00:11 -07002435 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2436
2437 dwc3_msm_power_collapse_por(mdwc);
2438
2439 /* Get initial P3 status and enable IN_P3 event */
2440 tmp = dwc3_msm_read_reg_field(mdwc->base,
2441 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2442 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2443 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2444 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2445
2446 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2447 }
2448
2449 atomic_set(&dwc->in_lpm, 0);
2450
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302451 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002452 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302453
Mayank Rana511f3b22016-08-02 12:00:11 -07002454 /* Disable HSPHY auto suspend */
2455 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2456 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2457 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2458 DWC3_GUSB2PHYCFG_SUSPHY));
2459
2460 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2461 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002462 if (mdwc->use_pdc_interrupts) {
2463 enable_usb_pdc_interrupt(mdwc, false);
2464 } else {
2465 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2466 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2467 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2468 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002469 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002470 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2471 }
2472
2473 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2474
Mayank Rana511f3b22016-08-02 12:00:11 -07002475 /* Enable core irq */
2476 if (dwc->irq)
2477 enable_irq(dwc->irq);
2478
2479 /*
2480 * Handle other power events that could not have been handled during
2481 * Low Power Mode
2482 */
2483 dwc3_pwr_event_handler(mdwc);
2484
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302485 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2486 schedule_delayed_work(&mdwc->perf_vote_work,
2487 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2488
Mayank Rana08e41922017-03-02 15:25:48 -08002489 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302490 mutex_unlock(&mdwc->suspend_resume_mutex);
2491
Mayank Rana511f3b22016-08-02 12:00:11 -07002492 return 0;
2493}
2494
2495/**
2496 * dwc3_ext_event_notify - callback to handle events from external transceiver
2497 *
2498 * Returns 0 on success
2499 */
2500static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2501{
2502 /* Flush processing any pending events before handling new ones */
2503 flush_delayed_work(&mdwc->sm_work);
2504
2505 if (mdwc->id_state == DWC3_ID_FLOAT) {
2506 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2507 set_bit(ID, &mdwc->inputs);
2508 } else {
2509 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2510 clear_bit(ID, &mdwc->inputs);
2511 }
2512
2513 if (mdwc->vbus_active && !mdwc->in_restart) {
2514 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2515 set_bit(B_SESS_VLD, &mdwc->inputs);
2516 } else {
2517 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2518 clear_bit(B_SESS_VLD, &mdwc->inputs);
2519 }
2520
2521 if (mdwc->suspend) {
2522 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2523 set_bit(B_SUSPEND, &mdwc->inputs);
2524 } else {
2525 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2526 clear_bit(B_SUSPEND, &mdwc->inputs);
2527 }
2528
2529 schedule_delayed_work(&mdwc->sm_work, 0);
2530}
2531
2532static void dwc3_resume_work(struct work_struct *w)
2533{
2534 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002535 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002536 union extcon_property_value val;
2537 unsigned int extcon_id;
2538 struct extcon_dev *edev = NULL;
2539 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002540
2541 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2542
Jack Pham4e9dff72017-04-04 18:05:53 -07002543 if (mdwc->vbus_active) {
2544 edev = mdwc->extcon_vbus;
2545 extcon_id = EXTCON_USB;
2546 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2547 edev = mdwc->extcon_id;
2548 extcon_id = EXTCON_USB_HOST;
2549 }
2550
2551 /* Check speed and Type-C polarity values in order to configure PHY */
2552 if (edev && extcon_get_state(edev, extcon_id)) {
2553 ret = extcon_get_property(edev, extcon_id,
2554 EXTCON_PROP_USB_SS, &val);
2555
2556 /* Use default dwc->maximum_speed if speed isn't reported */
2557 if (!ret)
2558 dwc->maximum_speed = (val.intval == 0) ?
2559 USB_SPEED_HIGH : USB_SPEED_SUPER;
2560
2561 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2562 dwc->maximum_speed = dwc->max_hw_supp_speed;
2563
Mayank Ranaf70d8212017-06-12 14:02:07 -07002564 if (override_usb_speed &&
2565 is_valid_usb_speed(dwc, override_usb_speed)) {
2566 dwc->maximum_speed = override_usb_speed;
2567 dbg_event(0xFF, "override_speed", override_usb_speed);
2568 }
2569
Jack Pham4e9dff72017-04-04 18:05:53 -07002570 dbg_event(0xFF, "speed", dwc->maximum_speed);
2571
2572 ret = extcon_get_property(edev, extcon_id,
2573 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2574 if (ret)
2575 mdwc->typec_orientation = ORIENTATION_NONE;
2576 else
2577 mdwc->typec_orientation = val.intval ?
2578 ORIENTATION_CC2 : ORIENTATION_CC1;
2579
2580 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2581 }
2582
Mayank Rana511f3b22016-08-02 12:00:11 -07002583 /*
2584 * exit LPM first to meet resume timeline from device side.
2585 * resume_pending flag would prevent calling
2586 * dwc3_msm_resume() in case we are here due to system
2587 * wide resume without usb cable connected. This flag is set
2588 * only in case of power event irq in lpm.
2589 */
2590 if (mdwc->resume_pending) {
2591 dwc3_msm_resume(mdwc);
2592 mdwc->resume_pending = false;
2593 }
2594
Mayank Rana08e41922017-03-02 15:25:48 -08002595 if (atomic_read(&mdwc->pm_suspended)) {
2596 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002597 /* let pm resume kick in resume work later */
2598 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002599 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002600 dwc3_ext_event_notify(mdwc);
2601}
2602
2603static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2604{
2605 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2606 u32 irq_stat, irq_clear = 0;
2607
2608 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2609 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2610
2611 /* Check for P3 events */
2612 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2613 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2614 /* Can't tell if entered or exit P3, so check LINKSTATE */
2615 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2616 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2617 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2618 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2619
2620 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2621 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2622 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2623 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2624 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2625 atomic_set(&mdwc->in_p3, 0);
2626 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2627 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2628 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2629 atomic_set(&mdwc->in_p3, 1);
2630 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2631 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2632 }
2633
2634 /* Clear L2 exit */
2635 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2636 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2637 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2638 }
2639
2640 /* Handle exit from L1 events */
2641 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2642 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2643 __func__);
2644 if (usb_gadget_wakeup(&dwc->gadget))
2645 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2646 __func__);
2647 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2648 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2649 }
2650
2651 /* Unhandled events */
2652 if (irq_stat)
2653 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2654 __func__, irq_stat);
2655
2656 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2657}
2658
2659static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2660{
2661 struct dwc3_msm *mdwc = _mdwc;
2662 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2663
2664 dev_dbg(mdwc->dev, "%s\n", __func__);
2665
2666 if (atomic_read(&dwc->in_lpm))
2667 dwc3_resume_work(&mdwc->resume_work);
2668 else
2669 dwc3_pwr_event_handler(mdwc);
2670
Mayank Rana08e41922017-03-02 15:25:48 -08002671 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002672 return IRQ_HANDLED;
2673}
2674
2675static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2676{
2677 struct dwc3_msm *mdwc = data;
2678 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2679
2680 dwc->t_pwr_evt_irq = ktime_get();
2681 dev_dbg(mdwc->dev, "%s received\n", __func__);
2682 /*
2683 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2684 * which interrupts have been triggered, as the clocks are disabled.
2685 * Resume controller by waking up pwr event irq thread.After re-enabling
2686 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2687 * all other power events.
2688 */
2689 if (atomic_read(&dwc->in_lpm)) {
2690 /* set this to call dwc3_msm_resume() */
2691 mdwc->resume_pending = true;
2692 return IRQ_WAKE_THREAD;
2693 }
2694
2695 dwc3_pwr_event_handler(mdwc);
2696 return IRQ_HANDLED;
2697}
2698
2699static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2700 unsigned long action, void *hcpu)
2701{
2702 uint32_t cpu = (uintptr_t)hcpu;
2703 struct dwc3_msm *mdwc =
2704 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2705
2706 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2707 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2708 cpu_to_affin, mdwc->irq_to_affin);
2709 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2710 }
2711
2712 return NOTIFY_OK;
2713}
2714
2715static void dwc3_otg_sm_work(struct work_struct *w);
2716
2717static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2718{
2719 int ret;
2720
2721 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2722 if (IS_ERR(mdwc->dwc3_gdsc))
2723 mdwc->dwc3_gdsc = NULL;
2724
2725 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2726 if (IS_ERR(mdwc->xo_clk)) {
2727 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2728 __func__);
2729 ret = PTR_ERR(mdwc->xo_clk);
2730 return ret;
2731 }
2732 clk_set_rate(mdwc->xo_clk, 19200000);
2733
2734 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2735 if (IS_ERR(mdwc->iface_clk)) {
2736 dev_err(mdwc->dev, "failed to get iface_clk\n");
2737 ret = PTR_ERR(mdwc->iface_clk);
2738 return ret;
2739 }
2740
2741 /*
2742 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2743 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2744 * On newer platform it can run at 150MHz as well.
2745 */
2746 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2747 if (IS_ERR(mdwc->core_clk)) {
2748 dev_err(mdwc->dev, "failed to get core_clk\n");
2749 ret = PTR_ERR(mdwc->core_clk);
2750 return ret;
2751 }
2752
Amit Nischal4d278212016-06-06 17:54:34 +05302753 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2754 if (IS_ERR(mdwc->core_reset)) {
2755 dev_err(mdwc->dev, "failed to get core_reset\n");
2756 return PTR_ERR(mdwc->core_reset);
2757 }
2758
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302759 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302760 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302761 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2762 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302763 }
2764
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302765 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302766 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302767 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2768 mdwc->core_clk_rate);
2769 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2770 if (ret)
2771 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002772
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002773 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2774 (u32 *)&mdwc->core_clk_rate_hs)) {
2775 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2776 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2777 }
2778
Mayank Rana511f3b22016-08-02 12:00:11 -07002779 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2780 if (IS_ERR(mdwc->sleep_clk)) {
2781 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2782 ret = PTR_ERR(mdwc->sleep_clk);
2783 return ret;
2784 }
2785
2786 clk_set_rate(mdwc->sleep_clk, 32000);
2787 mdwc->utmi_clk_rate = 19200000;
2788 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2789 if (IS_ERR(mdwc->utmi_clk)) {
2790 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2791 ret = PTR_ERR(mdwc->utmi_clk);
2792 return ret;
2793 }
2794
2795 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2796 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2797 if (IS_ERR(mdwc->bus_aggr_clk))
2798 mdwc->bus_aggr_clk = NULL;
2799
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302800 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2801 if (IS_ERR(mdwc->noc_aggr_clk))
2802 mdwc->noc_aggr_clk = NULL;
2803
Mayank Rana511f3b22016-08-02 12:00:11 -07002804 if (of_property_match_string(mdwc->dev->of_node,
2805 "clock-names", "cfg_ahb_clk") >= 0) {
2806 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2807 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2808 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2809 mdwc->cfg_ahb_clk = NULL;
2810 if (ret != -EPROBE_DEFER)
2811 dev_err(mdwc->dev,
2812 "failed to get cfg_ahb_clk ret %d\n",
2813 ret);
2814 return ret;
2815 }
2816 }
2817
2818 return 0;
2819}
2820
2821static int dwc3_msm_id_notifier(struct notifier_block *nb,
2822 unsigned long event, void *ptr)
2823{
2824 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002825 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002826 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002827
2828 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2829
2830 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2831
Mayank Rana511f3b22016-08-02 12:00:11 -07002832 if (mdwc->id_state != id) {
2833 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002834 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002835 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2836 }
2837
Mayank Rana511f3b22016-08-02 12:00:11 -07002838 return NOTIFY_DONE;
2839}
2840
Hemant Kumar006fae42017-07-12 18:11:25 -07002841
2842static void check_for_sdp_connection(struct work_struct *w)
2843{
Hemant Kumar006fae42017-07-12 18:11:25 -07002844 struct dwc3_msm *mdwc =
2845 container_of(w, struct dwc3_msm, sdp_check.work);
2846 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2847
2848 if (!mdwc->vbus_active)
2849 return;
2850
2851 /* floating D+/D- lines detected */
2852 if (dwc->gadget.state < USB_STATE_DEFAULT &&
2853 dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
Hemant Kumar006fae42017-07-12 18:11:25 -07002854 mdwc->vbus_active = 0;
2855 dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
2856 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2857 }
2858}
2859
Mayank Rana511f3b22016-08-02 12:00:11 -07002860static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2861 unsigned long event, void *ptr)
2862{
2863 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2864 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002865
2866 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2867
2868 if (mdwc->vbus_active == event)
2869 return NOTIFY_DONE;
2870
Mayank Rana511f3b22016-08-02 12:00:11 -07002871 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002872 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002873 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002874
Mayank Rana511f3b22016-08-02 12:00:11 -07002875 return NOTIFY_DONE;
2876}
Jack Pham4e9dff72017-04-04 18:05:53 -07002877
Mayank Rana51958172017-02-28 14:49:21 -08002878/*
Mayank Rana25d02862017-09-12 14:49:41 -07002879 * Handle EUD based soft detach/attach event
Mayank Rana51958172017-02-28 14:49:21 -08002880 *
2881 * @nb - notifier handler
2882 * @event - event information i.e. soft detach/attach event
2883 * @ptr - extcon_dev pointer
2884 *
2885 * @return int - NOTIFY_DONE always due to EUD
2886 */
2887static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2888 unsigned long event, void *ptr)
2889{
2890 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2891 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002892
2893 dbg_event(0xFF, "EUD_NB", event);
2894 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2895 if (mdwc->vbus_active == event)
2896 return NOTIFY_DONE;
2897
Mayank Rana51958172017-02-28 14:49:21 -08002898 mdwc->vbus_active = event;
2899 if (dwc->is_drd && !mdwc->in_restart)
2900 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002901
Mayank Rana51958172017-02-28 14:49:21 -08002902 return NOTIFY_DONE;
2903}
Mayank Rana511f3b22016-08-02 12:00:11 -07002904
Pratham Pratapd76a1782017-11-14 20:50:31 +05302905static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc, int start_idx)
Mayank Rana511f3b22016-08-02 12:00:11 -07002906{
2907 struct device_node *node = mdwc->dev->of_node;
2908 struct extcon_dev *edev;
2909 int ret = 0;
2910
2911 if (!of_property_read_bool(node, "extcon"))
2912 return 0;
2913
Pratham Pratapd76a1782017-11-14 20:50:31 +05302914 /*
2915 * Use mandatory phandle (index 0 for type-C; index 3 for microUSB)
2916 * for USB vbus status notification
2917 */
2918 edev = extcon_get_edev_by_phandle(mdwc->dev, start_idx);
Mayank Rana511f3b22016-08-02 12:00:11 -07002919 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2920 return PTR_ERR(edev);
2921
2922 if (!IS_ERR(edev)) {
2923 mdwc->extcon_vbus = edev;
2924 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2925 ret = extcon_register_notifier(edev, EXTCON_USB,
2926 &mdwc->vbus_nb);
2927 if (ret < 0) {
2928 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2929 return ret;
2930 }
2931 }
2932
Pratham Pratapd76a1782017-11-14 20:50:31 +05302933 /*
2934 * Use optional phandle (index 1 for type-C; index 4 for microUSB)
2935 * for USB ID status notification
2936 */
2937 if (of_count_phandle_with_args(node, "extcon", NULL) > start_idx + 1) {
2938 edev = extcon_get_edev_by_phandle(mdwc->dev, start_idx + 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07002939 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2940 ret = PTR_ERR(edev);
2941 goto err;
2942 }
2943 }
2944
2945 if (!IS_ERR(edev)) {
2946 mdwc->extcon_id = edev;
2947 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
Mayank Rana54d60432017-07-18 12:10:04 -07002948 mdwc->host_restart_nb.notifier_call =
2949 dwc3_restart_usb_host_mode;
Mayank Rana511f3b22016-08-02 12:00:11 -07002950 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2951 &mdwc->id_nb);
2952 if (ret < 0) {
2953 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2954 goto err;
2955 }
Mayank Rana54d60432017-07-18 12:10:04 -07002956
2957 ret = extcon_register_blocking_notifier(edev, EXTCON_USB_HOST,
2958 &mdwc->host_restart_nb);
2959 if (ret < 0) {
2960 dev_err(mdwc->dev, "failed to register blocking notifier\n");
2961 goto err1;
2962 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002963 }
2964
Mayank Rana81bd2e52017-07-26 16:15:15 -07002965 edev = NULL;
Pratham Pratapd76a1782017-11-14 20:50:31 +05302966 /* Use optional phandle (index 2) for EUD based detach/attach events */
Mayank Rana51958172017-02-28 14:49:21 -08002967 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2968 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2969 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2970 ret = PTR_ERR(edev);
Pratham Pratapd76a1782017-11-14 20:50:31 +05302971 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002972 }
2973 }
2974
Mayank Rana81bd2e52017-07-26 16:15:15 -07002975 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002976 mdwc->extcon_eud = edev;
2977 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2978 ret = extcon_register_notifier(edev, EXTCON_USB,
2979 &mdwc->eud_event_nb);
2980 if (ret < 0) {
2981 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
Mayank Rana54d60432017-07-18 12:10:04 -07002982 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002983 }
2984 }
2985
Mayank Rana511f3b22016-08-02 12:00:11 -07002986 return 0;
Mayank Rana54d60432017-07-18 12:10:04 -07002987err2:
2988 if (mdwc->extcon_id)
2989 extcon_unregister_blocking_notifier(mdwc->extcon_id,
2990 EXTCON_USB_HOST, &mdwc->host_restart_nb);
Mayank Rana51958172017-02-28 14:49:21 -08002991err1:
2992 if (mdwc->extcon_id)
2993 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2994 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002995err:
2996 if (mdwc->extcon_vbus)
2997 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2998 &mdwc->vbus_nb);
2999 return ret;
3000}
3001
Mayank Rana00d6f722017-09-18 17:22:03 -07003002#define SMMU_BASE 0x60000000 /* Device address range base */
3003#define SMMU_SIZE 0x90000000 /* Device address range size */
Jack Phambbe27962017-03-23 18:42:26 -07003004
3005static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
3006{
3007 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07003008 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07003009 int ret;
3010
3011 if (!of_property_read_bool(node, "iommus"))
3012 return 0;
3013
3014 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
3015 SMMU_BASE, SMMU_SIZE);
3016 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
3017 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
3018 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
3019 ret);
3020 return ret;
3021 }
3022 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
Mayank Rana377ddf42017-09-05 15:09:12 -07003023 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3024 DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR, &atomic_ctx);
3025 if (ret) {
3026 dev_err(mdwc->dev, "set UPSTREAM_IOVA_ALLOCATOR failed(%d)\n",
3027 ret);
3028 goto release_mapping;
3029 }
Jack Phambbe27962017-03-23 18:42:26 -07003030
3031 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
3032 &atomic_ctx);
3033 if (ret) {
3034 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
3035 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07003036 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07003037 }
3038
Jack Pham283cece2017-04-05 09:58:17 -07003039 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
3040 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3041 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
3042 if (ret) {
3043 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
3044 s1_bypass, ret);
3045 goto release_mapping;
3046 }
3047
Jack Pham9faa51df2017-04-03 18:13:40 -07003048 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
3049 if (ret) {
3050 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
3051 goto release_mapping;
3052 }
3053 dev_dbg(mdwc->dev, "attached to IOMMU\n");
3054
Jack Phambbe27962017-03-23 18:42:26 -07003055 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07003056
3057release_mapping:
3058 arm_iommu_release_mapping(mdwc->iommu_map);
3059 mdwc->iommu_map = NULL;
3060 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003061}
3062
Mayank Rana511f3b22016-08-02 12:00:11 -07003063static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3064 char *buf)
3065{
3066 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3067
3068 if (mdwc->vbus_active)
3069 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3070 if (mdwc->id_state == DWC3_ID_GROUND)
3071 return snprintf(buf, PAGE_SIZE, "host\n");
3072
3073 return snprintf(buf, PAGE_SIZE, "none\n");
3074}
3075
3076static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3077 const char *buf, size_t count)
3078{
3079 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3080
3081 if (sysfs_streq(buf, "peripheral")) {
3082 mdwc->vbus_active = true;
3083 mdwc->id_state = DWC3_ID_FLOAT;
3084 } else if (sysfs_streq(buf, "host")) {
3085 mdwc->vbus_active = false;
3086 mdwc->id_state = DWC3_ID_GROUND;
3087 } else {
3088 mdwc->vbus_active = false;
3089 mdwc->id_state = DWC3_ID_FLOAT;
3090 }
3091
3092 dwc3_ext_event_notify(mdwc);
3093
3094 return count;
3095}
3096
3097static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303098static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003099
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003100static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3101 char *buf)
3102{
3103 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3104 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3105
3106 return snprintf(buf, PAGE_SIZE, "%s\n",
3107 usb_speed_string(dwc->max_hw_supp_speed));
3108}
3109
3110static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3111 const char *buf, size_t count)
3112{
3113 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3114 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3115 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3116
3117 if (sysfs_streq(buf, "high"))
3118 req_speed = USB_SPEED_HIGH;
3119 else if (sysfs_streq(buf, "super"))
3120 req_speed = USB_SPEED_SUPER;
3121
3122 if (req_speed != USB_SPEED_UNKNOWN &&
3123 req_speed != dwc->max_hw_supp_speed) {
3124 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3125 schedule_work(&mdwc->restart_usb_work);
3126 }
3127
3128 return count;
3129}
3130static DEVICE_ATTR_RW(speed);
3131
Mayank Rana511f3b22016-08-02 12:00:11 -07003132static int dwc3_msm_probe(struct platform_device *pdev)
3133{
3134 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3135 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003136 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003137 struct dwc3_msm *mdwc;
3138 struct dwc3 *dwc;
3139 struct resource *res;
3140 void __iomem *tcsr;
3141 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003142 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003143 int ext_hub_reset_gpio;
3144 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003145 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003146
3147 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3148 if (!mdwc)
3149 return -ENOMEM;
3150
Mayank Rana511f3b22016-08-02 12:00:11 -07003151 platform_set_drvdata(pdev, mdwc);
3152 mdwc->dev = &pdev->dev;
3153
3154 INIT_LIST_HEAD(&mdwc->req_complete_list);
3155 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3156 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003157 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003158 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303159 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Hemant Kumar006fae42017-07-12 18:11:25 -07003160 INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
Mayank Rana511f3b22016-08-02 12:00:11 -07003161
3162 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3163 if (!mdwc->dwc3_wq) {
3164 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3165 return -ENOMEM;
3166 }
3167
3168 /* Get all clks and gdsc reference */
3169 ret = dwc3_msm_get_clk_gdsc(mdwc);
3170 if (ret) {
3171 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
Ziqi Chen0ea81162017-08-04 18:17:55 +08003172 goto err;
Mayank Rana511f3b22016-08-02 12:00:11 -07003173 }
3174
3175 mdwc->id_state = DWC3_ID_FLOAT;
3176 set_bit(ID, &mdwc->inputs);
3177
3178 mdwc->charging_disabled = of_property_read_bool(node,
3179 "qcom,charging-disabled");
3180
3181 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3182 &mdwc->lpm_to_suspend_delay);
3183 if (ret) {
3184 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3185 mdwc->lpm_to_suspend_delay = 0;
3186 }
3187
Mayank Ranad339abe2017-05-31 09:19:49 -07003188 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3189 for (i = 0; i < USB_MAX_IRQ; i++) {
3190 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3191 IRQF_ONESHOT;
3192 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3193 mdwc->wakeup_irq[i].name);
3194 if (mdwc->wakeup_irq[i].irq < 0) {
3195 /* pwr_evnt_irq is only mandatory irq */
3196 if (!strcmp(mdwc->wakeup_irq[i].name,
3197 "pwr_event_irq")) {
3198 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3199 mdwc->wakeup_irq[i].name);
3200 ret = -EINVAL;
3201 goto err;
3202 }
3203 mdwc->wakeup_irq[i].irq = 0;
3204 } else {
3205 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3206 IRQ_NOAUTOEN);
3207 /* ss_phy_irq is level trigger interrupt */
3208 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3209 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3210 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003211
Mayank Ranad339abe2017-05-31 09:19:49 -07003212 ret = devm_request_threaded_irq(&pdev->dev,
3213 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003214 msm_dwc3_pwr_irq,
3215 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003216 irq_type,
3217 mdwc->wakeup_irq[i].name, mdwc);
3218 if (ret) {
3219 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3220 mdwc->wakeup_irq[i].name, ret);
3221 goto err;
3222 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003223 }
3224 }
3225
3226 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3227 if (!res) {
3228 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3229 } else {
3230 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3231 resource_size(res));
3232 if (IS_ERR_OR_NULL(tcsr)) {
3233 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3234 } else {
3235 /* Enable USB3 on the primary USB port. */
3236 writel_relaxed(0x1, tcsr);
3237 /*
3238 * Ensure that TCSR write is completed before
3239 * USB registers initialization.
3240 */
3241 mb();
3242 }
3243 }
3244
3245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3246 if (!res) {
3247 dev_err(&pdev->dev, "missing memory base resource\n");
3248 ret = -ENODEV;
3249 goto err;
3250 }
3251
3252 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3253 resource_size(res));
3254 if (!mdwc->base) {
3255 dev_err(&pdev->dev, "ioremap failed\n");
3256 ret = -ENODEV;
3257 goto err;
3258 }
3259
3260 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3261 "ahb2phy_base");
3262 if (res) {
3263 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3264 res->start, resource_size(res));
3265 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3266 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3267 mdwc->ahb2phy_base = NULL;
3268 } else {
3269 /*
3270 * On some targets cfg_ahb_clk depends upon usb gdsc
3271 * regulator. If cfg_ahb_clk is enabled without
3272 * turning on usb gdsc regulator clk is stuck off.
3273 */
3274 dwc3_msm_config_gdsc(mdwc, 1);
3275 clk_prepare_enable(mdwc->cfg_ahb_clk);
3276 /* Configure AHB2PHY for one wait state read/write*/
3277 val = readl_relaxed(mdwc->ahb2phy_base +
3278 PERIPH_SS_AHB2PHY_TOP_CFG);
3279 if (val != ONE_READ_WRITE_WAIT) {
3280 writel_relaxed(ONE_READ_WRITE_WAIT,
3281 mdwc->ahb2phy_base +
3282 PERIPH_SS_AHB2PHY_TOP_CFG);
3283 /* complete above write before using USB PHY */
3284 mb();
3285 }
3286 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3287 dwc3_msm_config_gdsc(mdwc, 0);
3288 }
3289 }
3290
3291 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3292 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3293 if (IS_ERR(mdwc->dbm)) {
3294 dev_err(&pdev->dev, "unable to get dbm device\n");
3295 ret = -EPROBE_DEFER;
3296 goto err;
3297 }
3298 /*
3299 * Add power event if the dbm indicates coming out of L1
3300 * by interrupt
3301 */
3302 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003303 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003304 dev_err(&pdev->dev,
3305 "need pwr_event_irq exiting L1\n");
3306 ret = -EINVAL;
3307 goto err;
3308 }
3309 }
3310 }
3311
3312 ext_hub_reset_gpio = of_get_named_gpio(node,
3313 "qcom,ext-hub-reset-gpio", 0);
3314
3315 if (gpio_is_valid(ext_hub_reset_gpio)
3316 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3317 "qcom,ext-hub-reset-gpio"))) {
3318 /* reset external hub */
3319 gpio_direction_output(ext_hub_reset_gpio, 1);
3320 /*
3321 * Hub reset should be asserted for minimum 5microsec
3322 * before deasserting.
3323 */
3324 usleep_range(5, 1000);
3325 gpio_direction_output(ext_hub_reset_gpio, 0);
3326 }
3327
3328 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3329 &mdwc->tx_fifo_size))
3330 dev_err(&pdev->dev,
3331 "unable to read platform data tx fifo size\n");
3332
3333 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3334 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003335 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3336 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003337 dwc3_set_notifier(&dwc3_msm_notify_event);
3338
Jack Phambbe27962017-03-23 18:42:26 -07003339 ret = dwc3_msm_init_iommu(mdwc);
3340 if (ret)
3341 goto err;
3342
Mayank Rana42dfac42017-10-03 15:01:03 -07003343 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3344 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3345 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3346 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3347 ret = -EOPNOTSUPP;
3348 goto uninit_iommu;
3349 }
3350 }
3351
Mayank Rana511f3b22016-08-02 12:00:11 -07003352 /* Assumes dwc3 is the first DT child of dwc3-msm */
3353 dwc3_node = of_get_next_available_child(node, NULL);
3354 if (!dwc3_node) {
3355 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3356 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003357 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003358 }
3359
3360 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3361 if (ret) {
3362 dev_err(&pdev->dev,
3363 "failed to add create dwc3 core\n");
3364 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003365 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003366 }
3367
3368 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3369 of_node_put(dwc3_node);
3370 if (!mdwc->dwc3) {
3371 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3372 goto put_dwc3;
3373 }
3374
3375 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3376 "usb-phy", 0);
3377 if (IS_ERR(mdwc->hs_phy)) {
3378 dev_err(&pdev->dev, "unable to get hsphy device\n");
3379 ret = PTR_ERR(mdwc->hs_phy);
3380 goto put_dwc3;
3381 }
3382 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3383 "usb-phy", 1);
3384 if (IS_ERR(mdwc->ss_phy)) {
3385 dev_err(&pdev->dev, "unable to get ssphy device\n");
3386 ret = PTR_ERR(mdwc->ss_phy);
3387 goto put_dwc3;
3388 }
3389
3390 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3391 if (mdwc->bus_scale_table) {
3392 mdwc->bus_perf_client =
3393 msm_bus_scale_register_client(mdwc->bus_scale_table);
3394 }
3395
3396 dwc = platform_get_drvdata(mdwc->dwc3);
3397 if (!dwc) {
3398 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3399 goto put_dwc3;
3400 }
3401
3402 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3403 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3404
3405 if (cpu_to_affin)
3406 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3407
Mayank Ranaf4918d32016-12-15 13:35:55 -08003408 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3409 &mdwc->num_gsi_event_buffers);
3410
Jack Pham9faa51df2017-04-03 18:13:40 -07003411 /* IOMMU will be reattached upon each resume/connect */
3412 if (mdwc->iommu_map)
3413 arm_iommu_detach_device(mdwc->dev);
3414
Mayank Rana511f3b22016-08-02 12:00:11 -07003415 /*
3416 * Clocks and regulators will not be turned on until the first time
3417 * runtime PM resume is called. This is to allow for booting up with
3418 * charger already connected so as not to disturb PHY line states.
3419 */
3420 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3421 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003422 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3423 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003424 device_init_wakeup(mdwc->dev, 1);
3425
3426 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3427 pm_runtime_get_noresume(mdwc->dev);
3428
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303429 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3430 &mdwc->pm_qos_latency);
3431 if (ret) {
3432 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3433 mdwc->pm_qos_latency = 0;
3434 }
3435
Pratham Pratapd76a1782017-11-14 20:50:31 +05303436 mdwc->no_vbus_vote_type_c = of_property_read_bool(node,
3437 "qcom,no-vbus-vote-with-type-C");
3438
3439 /* Mark type-C as true by default */
3440 mdwc->type_c = true;
3441
Hemant Kumar8220a982017-01-19 18:11:34 -08003442 mdwc->usb_psy = power_supply_get_by_name("usb");
3443 if (!mdwc->usb_psy) {
3444 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3445 pval.intval = -EINVAL;
3446 } else {
3447 power_supply_get_property(mdwc->usb_psy,
Pratham Pratapd76a1782017-11-14 20:50:31 +05303448 POWER_SUPPLY_PROP_CONNECTOR_TYPE, &pval);
3449 if (pval.intval == POWER_SUPPLY_CONNECTOR_MICRO_USB)
3450 mdwc->type_c = false;
3451 power_supply_get_property(mdwc->usb_psy,
Hemant Kumar8220a982017-01-19 18:11:34 -08003452 POWER_SUPPLY_PROP_PRESENT, &pval);
3453 }
3454
Pratham Pratapd76a1782017-11-14 20:50:31 +05303455 /*
3456 * Extcon phandles starting indices in DT:
3457 * type-C : 0
3458 * microUSB : 3
3459 */
3460 ret = dwc3_msm_extcon_register(mdwc, mdwc->type_c ? 0 : 3);
3461 if (ret)
3462 goto put_psy;
3463
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05303464 mutex_init(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07003465 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003466 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003467 EXTCON_USB))
3468 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003469 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003470 EXTCON_USB_HOST))
3471 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003472 else if (!pval.intval) {
3473 /* USB cable is not connected */
3474 schedule_delayed_work(&mdwc->sm_work, 0);
3475 } else {
3476 if (pval.intval > 0)
3477 dev_info(mdwc->dev, "charger detection in progress\n");
3478 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003479
3480 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003481 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003482
Mayank Rana511f3b22016-08-02 12:00:11 -07003483 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3484 if (!dwc->is_drd && host_mode) {
3485 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3486 mdwc->id_state = DWC3_ID_GROUND;
3487 dwc3_ext_event_notify(mdwc);
3488 }
3489
3490 return 0;
3491
Pratham Pratapd76a1782017-11-14 20:50:31 +05303492put_psy:
3493 if (mdwc->usb_psy)
3494 power_supply_put(mdwc->usb_psy);
3495
3496 if (cpu_to_affin)
3497 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
Mayank Rana511f3b22016-08-02 12:00:11 -07003498put_dwc3:
Mayank Rana511f3b22016-08-02 12:00:11 -07003499 if (mdwc->bus_perf_client)
3500 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Ziqi Chen0ea81162017-08-04 18:17:55 +08003501
Jack Phambbe27962017-03-23 18:42:26 -07003502uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003503 if (mdwc->iommu_map) {
3504 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003505 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003506 }
Ziqi Chen0ea81162017-08-04 18:17:55 +08003507 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003508err:
Ziqi Chen0ea81162017-08-04 18:17:55 +08003509 destroy_workqueue(mdwc->dwc3_wq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003510 return ret;
3511}
3512
Mayank Rana511f3b22016-08-02 12:00:11 -07003513static int dwc3_msm_remove(struct platform_device *pdev)
3514{
3515 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003516 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003517 int ret_pm;
3518
3519 device_remove_file(&pdev->dev, &dev_attr_mode);
Pratham Pratapd76a1782017-11-14 20:50:31 +05303520 if (mdwc->usb_psy)
3521 power_supply_put(mdwc->usb_psy);
Mayank Rana511f3b22016-08-02 12:00:11 -07003522
3523 if (cpu_to_affin)
3524 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3525
3526 /*
3527 * In case of system suspend, pm_runtime_get_sync fails.
3528 * Hence turn ON the clocks manually.
3529 */
3530 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003531 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003532 if (ret_pm < 0) {
3533 dev_err(mdwc->dev,
3534 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303535 if (mdwc->noc_aggr_clk)
3536 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003537 clk_prepare_enable(mdwc->utmi_clk);
3538 clk_prepare_enable(mdwc->core_clk);
3539 clk_prepare_enable(mdwc->iface_clk);
3540 clk_prepare_enable(mdwc->sleep_clk);
3541 if (mdwc->bus_aggr_clk)
3542 clk_prepare_enable(mdwc->bus_aggr_clk);
3543 clk_prepare_enable(mdwc->xo_clk);
3544 }
3545
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303546 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003547 cancel_delayed_work_sync(&mdwc->sm_work);
3548
3549 if (mdwc->hs_phy)
3550 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Ziqi Chen0ea81162017-08-04 18:17:55 +08003551 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003552
Mayank Rana08e41922017-03-02 15:25:48 -08003553 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003554 pm_runtime_disable(mdwc->dev);
3555 pm_runtime_barrier(mdwc->dev);
3556 pm_runtime_put_sync(mdwc->dev);
3557 pm_runtime_set_suspended(mdwc->dev);
3558 device_wakeup_disable(mdwc->dev);
3559
3560 if (mdwc->bus_perf_client)
3561 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3562
3563 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3564 regulator_disable(mdwc->vbus_reg);
3565
Mayank Ranad339abe2017-05-31 09:19:49 -07003566 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3567 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3568 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3569 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3570 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3571 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3572 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3573 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3574 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003575
3576 clk_disable_unprepare(mdwc->utmi_clk);
3577 clk_set_rate(mdwc->core_clk, 19200000);
3578 clk_disable_unprepare(mdwc->core_clk);
3579 clk_disable_unprepare(mdwc->iface_clk);
3580 clk_disable_unprepare(mdwc->sleep_clk);
3581 clk_disable_unprepare(mdwc->xo_clk);
3582 clk_put(mdwc->xo_clk);
3583
3584 dwc3_msm_config_gdsc(mdwc, 0);
3585
Jack Phambbe27962017-03-23 18:42:26 -07003586 if (mdwc->iommu_map) {
3587 if (!atomic_read(&dwc->in_lpm))
3588 arm_iommu_detach_device(mdwc->dev);
3589 arm_iommu_release_mapping(mdwc->iommu_map);
3590 }
3591
Mayank Rana511f3b22016-08-02 12:00:11 -07003592 return 0;
3593}
3594
Jack Pham4d4e9342016-12-07 19:25:02 -08003595static int dwc3_msm_host_notifier(struct notifier_block *nb,
3596 unsigned long event, void *ptr)
3597{
3598 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3599 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3600 struct usb_device *udev = ptr;
3601 union power_supply_propval pval;
3602 unsigned int max_power;
3603
3604 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3605 return NOTIFY_DONE;
3606
3607 if (!mdwc->usb_psy) {
3608 mdwc->usb_psy = power_supply_get_by_name("usb");
3609 if (!mdwc->usb_psy)
3610 return NOTIFY_DONE;
3611 }
3612
3613 /*
3614 * For direct-attach devices, new udev is direct child of root hub
3615 * i.e. dwc -> xhci -> root_hub -> udev
3616 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3617 */
3618 if (udev->parent && !udev->parent->parent &&
3619 udev->dev.parent->parent == &dwc->xhci->dev) {
3620 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003621 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3622 /*
3623 * Core clock rate can be reduced only if root
3624 * hub SS port is not enabled/connected.
3625 */
3626 clk_set_rate(mdwc->core_clk,
3627 mdwc->core_clk_rate_hs);
3628 dev_dbg(mdwc->dev,
3629 "set hs core clk rate %ld\n",
3630 mdwc->core_clk_rate_hs);
3631 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3632 } else {
3633 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3634 }
3635
Jack Pham4d4e9342016-12-07 19:25:02 -08003636 if (udev->speed >= USB_SPEED_SUPER)
3637 max_power = udev->actconfig->desc.bMaxPower * 8;
3638 else
3639 max_power = udev->actconfig->desc.bMaxPower * 2;
3640 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3641 dev_name(&udev->dev), max_power);
3642
3643 /* inform PMIC of max power so it can optimize boost */
3644 pval.intval = max_power * 1000;
3645 power_supply_set_property(mdwc->usb_psy,
3646 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3647 } else {
3648 pval.intval = 0;
3649 power_supply_set_property(mdwc->usb_psy,
3650 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003651
3652 /* set rate back to default core clk rate */
3653 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3654 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3655 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003656 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003657 }
3658 }
3659
3660 return NOTIFY_DONE;
3661}
3662
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303663static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3664{
3665 static bool curr_perf_mode;
3666 int latency = mdwc->pm_qos_latency;
3667
3668 if ((curr_perf_mode == perf_mode) || !latency)
3669 return;
3670
3671 if (perf_mode)
3672 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3673 else
3674 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3675 PM_QOS_DEFAULT_VALUE);
3676
3677 curr_perf_mode = perf_mode;
3678 pr_debug("%s: latency updated to: %d\n", __func__,
3679 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3680}
3681
3682static void msm_dwc3_perf_vote_work(struct work_struct *w)
3683{
3684 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3685 perf_vote_work.work);
3686 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3687 static unsigned long last_irq_cnt;
3688 bool in_perf_mode = false;
3689
3690 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3691 in_perf_mode = true;
3692
3693 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3694 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3695
3696 last_irq_cnt = dwc->irq_cnt;
3697 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3698 schedule_delayed_work(&mdwc->perf_vote_work,
3699 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3700}
3701
Mayank Rana511f3b22016-08-02 12:00:11 -07003702#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3703
3704/**
3705 * dwc3_otg_start_host - helper function for starting/stoping the host
3706 * controller driver.
3707 *
3708 * @mdwc: Pointer to the dwc3_msm structure.
3709 * @on: start / stop the host controller driver.
3710 *
3711 * Returns 0 on success otherwise negative errno.
3712 */
3713static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3714{
3715 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3716 int ret = 0;
3717
Mayank Rana511f3b22016-08-02 12:00:11 -07003718 /*
3719 * The vbus_reg pointer could have multiple values
3720 * NULL: regulator_get() hasn't been called, or was previously deferred
3721 * IS_ERR: regulator could not be obtained, so skip using it
3722 * Valid pointer otherwise
3723 */
Pratham Pratapd76a1782017-11-14 20:50:31 +05303724 if (!mdwc->vbus_reg && (!mdwc->type_c ||
3725 (mdwc->type_c && !mdwc->no_vbus_vote_type_c))) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003726 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3727 "vbus_dwc3");
3728 if (IS_ERR(mdwc->vbus_reg) &&
3729 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3730 /* regulators may not be ready, so retry again later */
3731 mdwc->vbus_reg = NULL;
3732 return -EPROBE_DEFER;
3733 }
3734 }
3735
3736 if (on) {
3737 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3738
Mayank Rana511f3b22016-08-02 12:00:11 -07003739 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003740 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003741 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003742 usb_phy_notify_connect(mdwc->ss_phy,
3743 USB_SPEED_SUPER);
3744 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003745
Mayank Rana0d5efd72017-06-08 10:06:00 -07003746 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003747 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003748 dbg_event(0xFF, "StrtHost gync",
3749 atomic_read(&mdwc->dev->power.usage_count));
Pratham Pratapd76a1782017-11-14 20:50:31 +05303750 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
Mayank Rana511f3b22016-08-02 12:00:11 -07003751 ret = regulator_enable(mdwc->vbus_reg);
3752 if (ret) {
3753 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3754 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3755 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3756 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003757 dbg_event(0xFF, "vregerr psync",
3758 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003759 return ret;
3760 }
3761
3762 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3763
Jack Pham4d4e9342016-12-07 19:25:02 -08003764 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3765 usb_register_notify(&mdwc->host_nb);
3766
Manu Gautam976fdfc2016-08-18 09:27:35 +05303767 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3768 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Ranaa75caa52017-10-10 11:45:13 -07003769 ret = dwc3_host_init(dwc);
Mayank Rana511f3b22016-08-02 12:00:11 -07003770 if (ret) {
3771 dev_err(mdwc->dev,
3772 "%s: failed to add XHCI pdev ret=%d\n",
3773 __func__, ret);
Pratham Pratapd76a1782017-11-14 20:50:31 +05303774 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
Mayank Rana511f3b22016-08-02 12:00:11 -07003775 regulator_disable(mdwc->vbus_reg);
3776 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3777 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3778 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003779 dbg_event(0xFF, "pdeverr psync",
3780 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003781 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003782 return ret;
3783 }
3784
3785 /*
3786 * In some cases it is observed that USB PHY is not going into
3787 * suspend with host mode suspend functionality. Hence disable
3788 * XHCI's runtime PM here if disable_host_mode_pm is set.
3789 */
3790 if (mdwc->disable_host_mode_pm)
3791 pm_runtime_disable(&dwc->xhci->dev);
3792
3793 mdwc->in_host_mode = true;
3794 dwc3_usb3_phy_suspend(dwc, true);
3795
3796 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003797 dbg_event(0xFF, "StrtHost psync",
3798 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003799 pm_runtime_mark_last_busy(mdwc->dev);
3800 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303801#ifdef CONFIG_SMP
3802 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3803 mdwc->pm_qos_req_dma.irq = dwc->irq;
3804#endif
3805 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3806 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3807 /* start in perf mode for better performance initially */
3808 msm_dwc3_perf_vote_update(mdwc, true);
3809 schedule_delayed_work(&mdwc->perf_vote_work,
3810 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003811 } else {
3812 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3813
Manu Gautam976fdfc2016-08-18 09:27:35 +05303814 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Pratham Pratapd76a1782017-11-14 20:50:31 +05303815 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
Mayank Rana511f3b22016-08-02 12:00:11 -07003816 ret = regulator_disable(mdwc->vbus_reg);
3817 if (ret) {
3818 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3819 return ret;
3820 }
3821
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303822 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3823 msm_dwc3_perf_vote_update(mdwc, false);
3824 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3825
Mayank Rana511f3b22016-08-02 12:00:11 -07003826 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003827 dbg_event(0xFF, "StopHost gsync",
3828 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003829 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003830 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3831 usb_phy_notify_disconnect(mdwc->ss_phy,
3832 USB_SPEED_SUPER);
3833 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3834 }
3835
Mayank Rana511f3b22016-08-02 12:00:11 -07003836 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Ranaa75caa52017-10-10 11:45:13 -07003837 dwc3_host_exit(dwc);
Jack Pham4d4e9342016-12-07 19:25:02 -08003838 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003839
Mayank Rana511f3b22016-08-02 12:00:11 -07003840 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003841 mdwc->in_host_mode = false;
3842
Mayank Ranaa1d094c2017-11-03 10:40:10 -07003843 pm_runtime_put_sync_suspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003844 dbg_event(0xFF, "StopHost psync",
3845 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003846 }
3847
3848 return 0;
3849}
3850
3851static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3852{
3853 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3854
3855 /* Update OTG VBUS Valid from HSPHY to controller */
3856 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3857 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3858 UTMI_OTG_VBUS_VALID,
3859 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3860
3861 /* Update only if Super Speed is supported */
3862 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3863 /* Update VBUS Valid from SSPHY to controller */
3864 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3865 LANE0_PWR_PRESENT,
3866 vbus_present ? LANE0_PWR_PRESENT : 0);
3867 }
3868}
3869
3870/**
3871 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3872 *
3873 * @mdwc: Pointer to the dwc3_msm structure.
3874 * @on: Turn ON/OFF the gadget.
3875 *
3876 * Returns 0 on success otherwise negative errno.
3877 */
3878static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3879{
3880 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3881
3882 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003883 dbg_event(0xFF, "StrtGdgt gsync",
3884 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003885
3886 if (on) {
3887 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3888 __func__, dwc->gadget.name);
3889
3890 dwc3_override_vbus_status(mdwc, true);
3891 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3892 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3893
3894 /*
3895 * Core reset is not required during start peripheral. Only
3896 * DBM reset is required, hence perform only DBM reset here.
3897 */
3898 dwc3_msm_block_reset(mdwc, false);
3899
3900 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3901 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303902#ifdef CONFIG_SMP
3903 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3904 mdwc->pm_qos_req_dma.irq = dwc->irq;
3905#endif
3906 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3907 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3908 /* start in perf mode for better performance initially */
3909 msm_dwc3_perf_vote_update(mdwc, true);
3910 schedule_delayed_work(&mdwc->perf_vote_work,
3911 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003912 } else {
3913 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3914 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303915 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3916 msm_dwc3_perf_vote_update(mdwc, false);
3917 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3918
Mayank Rana511f3b22016-08-02 12:00:11 -07003919 usb_gadget_vbus_disconnect(&dwc->gadget);
3920 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3921 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3922 dwc3_override_vbus_status(mdwc, false);
3923 dwc3_usb3_phy_suspend(dwc, false);
3924 }
3925
3926 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003927 dbg_event(0xFF, "StopGdgt psync",
3928 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003929
3930 return 0;
3931}
3932
Mayank Rana54d60432017-07-18 12:10:04 -07003933/* speed: 0 - USB_SPEED_HIGH, 1 - USB_SPEED_SUPER */
3934static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
3935 unsigned long event, void *ptr)
3936{
3937 struct dwc3_msm *mdwc;
3938 struct dwc3 *dwc;
3939 int ret = -EINVAL, usb_speed;
3940
3941 mdwc = container_of(nb, struct dwc3_msm, host_restart_nb);
3942 dwc = platform_get_drvdata(mdwc->dwc3);
3943
3944 usb_speed = (event == 0 ? USB_SPEED_HIGH : USB_SPEED_SUPER);
3945 if (dwc->maximum_speed == usb_speed)
3946 goto err;
3947
Mayank Rana8a5cba82017-10-27 15:12:54 -07003948 dbg_event(0xFF, "fw_restarthost", 0);
3949 flush_delayed_work(&mdwc->sm_work);
Mayank Rana54d60432017-07-18 12:10:04 -07003950 dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
3951 ret = dwc3_otg_start_host(mdwc, 0);
3952 if (ret)
3953 goto err;
3954
Vijayavardhan Vennapusa2ba9b802017-12-08 10:46:44 +05303955 dbg_event(0xFF, "USB_lpm_state", atomic_read(&dwc->in_lpm));
Mayank Rana54d60432017-07-18 12:10:04 -07003956 /*
3957 * stop host mode functionality performs autosuspend with mdwc
3958 * device, and it may take sometime to call PM runtime suspend.
3959 * Hence call pm_runtime_suspend() API to invoke PM runtime
3960 * suspend immediately to put USB controller and PHYs into suspend.
3961 */
3962 ret = pm_runtime_suspend(mdwc->dev);
Vijayavardhan Vennapusa2ba9b802017-12-08 10:46:44 +05303963 /*
3964 * If mdwc device is already suspended, pm_runtime_suspend() API
3965 * returns 1, which is not error. Overwrite with zero if it is.
3966 */
3967 if (ret > 0)
3968 ret = 0;
Mayank Rana54d60432017-07-18 12:10:04 -07003969 dbg_event(0xFF, "pm_runtime_sus", ret);
3970
3971 dwc->maximum_speed = usb_speed;
3972 mdwc->otg_state = OTG_STATE_B_IDLE;
3973 schedule_delayed_work(&mdwc->sm_work, 0);
3974 dbg_event(0xFF, "complete_host_change", dwc->maximum_speed);
3975err:
3976 return ret;
3977}
3978
Hemant Kumar006fae42017-07-12 18:11:25 -07003979static int get_psy_type(struct dwc3_msm *mdwc)
Mayank Rana511f3b22016-08-02 12:00:11 -07003980{
Jack Pham8caff352016-08-19 16:33:55 -07003981 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003982
3983 if (mdwc->charging_disabled)
Hemant Kumar006fae42017-07-12 18:11:25 -07003984 return -EINVAL;
Mayank Rana511f3b22016-08-02 12:00:11 -07003985
3986 if (!mdwc->usb_psy) {
3987 mdwc->usb_psy = power_supply_get_by_name("usb");
3988 if (!mdwc->usb_psy) {
Hemant Kumar006fae42017-07-12 18:11:25 -07003989 dev_err(mdwc->dev, "Could not get usb psy\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003990 return -ENODEV;
3991 }
3992 }
3993
Hemant Kumar006fae42017-07-12 18:11:25 -07003994 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
3995 &pval);
3996
3997 return pval.intval;
3998}
3999
4000static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
4001{
4002 union power_supply_propval pval = {0};
4003 int ret, psy_type;
4004
Hemant Kumar006fae42017-07-12 18:11:25 -07004005 psy_type = get_psy_type(mdwc);
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05304006 if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
Sai Krishna Juturif236f3e2017-12-08 15:00:43 +05304007 if (!mA)
4008 pval.intval = -ETIMEDOUT;
4009 else
4010 pval.intval = 1000 * mA;
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05304011 goto set_prop;
Hemant Kumard6bae052017-07-27 15:11:25 -07004012 }
Jack Pham8caff352016-08-19 16:33:55 -07004013
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05304014 if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
4015 return 0;
4016
4017 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
4018 /* Set max current limit in uA */
4019 pval.intval = 1000 * mA;
4020
4021set_prop:
Jack Phamd72bafe2016-08-09 11:07:22 -07004022 ret = power_supply_set_property(mdwc->usb_psy,
Nicholas Troast7f55c922017-07-25 13:18:03 -07004023 POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
Jack Phamd72bafe2016-08-09 11:07:22 -07004024 if (ret) {
4025 dev_dbg(mdwc->dev, "power supply error when setting property\n");
4026 return ret;
4027 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004028
4029 mdwc->max_power = mA;
4030 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07004031}
4032
4033
4034/**
4035 * dwc3_otg_sm_work - workqueue function.
4036 *
4037 * @w: Pointer to the dwc3 otg workqueue
4038 *
4039 * NOTE: After any change in otg_state, we must reschdule the state machine.
4040 */
4041static void dwc3_otg_sm_work(struct work_struct *w)
4042{
4043 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
4044 struct dwc3 *dwc = NULL;
4045 bool work = 0;
4046 int ret = 0;
4047 unsigned long delay = 0;
4048 const char *state;
4049
4050 if (mdwc->dwc3)
4051 dwc = platform_get_drvdata(mdwc->dwc3);
4052
4053 if (!dwc) {
4054 dev_err(mdwc->dev, "dwc is NULL.\n");
4055 return;
4056 }
4057
4058 state = usb_otg_state_string(mdwc->otg_state);
4059 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08004060 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004061
4062 /* Check OTG state */
4063 switch (mdwc->otg_state) {
4064 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08004065 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07004066 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08004067 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
4068 dbg_event(0xFF, "undef_id_!bsv", 0);
4069 pm_runtime_set_active(mdwc->dev);
4070 pm_runtime_enable(mdwc->dev);
4071 pm_runtime_get_noresume(mdwc->dev);
4072 dwc3_msm_resume(mdwc);
4073 pm_runtime_put_sync(mdwc->dev);
4074 dbg_event(0xFF, "Undef NoUSB",
4075 atomic_read(&mdwc->dev->power.usage_count));
4076 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07004077 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08004078 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004079
Mayank Rana08e41922017-03-02 15:25:48 -08004080 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004081 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08004082 pm_runtime_set_suspended(mdwc->dev);
4083 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07004084 /* fall-through */
4085 case OTG_STATE_B_IDLE:
4086 if (!test_bit(ID, &mdwc->inputs)) {
4087 dev_dbg(mdwc->dev, "!id\n");
4088 mdwc->otg_state = OTG_STATE_A_IDLE;
4089 work = 1;
4090 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
4091 dev_dbg(mdwc->dev, "b_sess_vld\n");
Hemant Kumar006fae42017-07-12 18:11:25 -07004092 if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
4093 queue_delayed_work(mdwc->dwc3_wq,
4094 &mdwc->sdp_check,
4095 msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
Mayank Rana511f3b22016-08-02 12:00:11 -07004096 /*
4097 * Increment pm usage count upon cable connect. Count
4098 * is decremented in OTG_STATE_B_PERIPHERAL state on
4099 * cable disconnect or in bus suspend.
4100 */
4101 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004102 dbg_event(0xFF, "BIDLE gsync",
4103 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004104 dwc3_otg_start_peripheral(mdwc, 1);
4105 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4106 work = 1;
4107 } else {
4108 dwc3_msm_gadget_vbus_draw(mdwc, 0);
4109 dev_dbg(mdwc->dev, "Cable disconnected\n");
4110 }
4111 break;
4112
4113 case OTG_STATE_B_PERIPHERAL:
4114 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
4115 !test_bit(ID, &mdwc->inputs)) {
4116 dev_dbg(mdwc->dev, "!id || !bsv\n");
4117 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004118 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004119 dwc3_otg_start_peripheral(mdwc, 0);
4120 /*
4121 * Decrement pm usage count upon cable disconnect
4122 * which was incremented upon cable connect in
4123 * OTG_STATE_B_IDLE state
4124 */
Mayank Ranace7ff8b62017-11-09 17:25:55 -08004125 pm_runtime_put_sync_suspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004126 dbg_event(0xFF, "!BSV psync",
4127 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004128 work = 1;
4129 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
4130 test_bit(B_SESS_VLD, &mdwc->inputs)) {
4131 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
4132 mdwc->otg_state = OTG_STATE_B_SUSPEND;
4133 /*
4134 * Decrement pm usage count upon bus suspend.
4135 * Count was incremented either upon cable
4136 * connect in OTG_STATE_B_IDLE or host
4137 * initiated resume after bus suspend in
4138 * OTG_STATE_B_SUSPEND state
4139 */
4140 pm_runtime_mark_last_busy(mdwc->dev);
4141 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004142 dbg_event(0xFF, "SUSP put",
4143 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004144 }
4145 break;
4146
4147 case OTG_STATE_B_SUSPEND:
4148 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4149 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4150 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004151 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004152 dwc3_otg_start_peripheral(mdwc, 0);
4153 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4154 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4155 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4156 /*
4157 * Increment pm usage count upon host
4158 * initiated resume. Count was decremented
4159 * upon bus suspend in
4160 * OTG_STATE_B_PERIPHERAL state.
4161 */
4162 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004163 dbg_event(0xFF, "!SUSP gsync",
4164 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004165 }
4166 break;
4167
4168 case OTG_STATE_A_IDLE:
4169 /* Switch to A-Device*/
4170 if (test_bit(ID, &mdwc->inputs)) {
4171 dev_dbg(mdwc->dev, "id\n");
4172 mdwc->otg_state = OTG_STATE_B_IDLE;
4173 mdwc->vbus_retry_count = 0;
4174 work = 1;
4175 } else {
4176 mdwc->otg_state = OTG_STATE_A_HOST;
4177 ret = dwc3_otg_start_host(mdwc, 1);
4178 if ((ret == -EPROBE_DEFER) &&
4179 mdwc->vbus_retry_count < 3) {
4180 /*
4181 * Get regulator failed as regulator driver is
4182 * not up yet. Will try to start host after 1sec
4183 */
4184 mdwc->otg_state = OTG_STATE_A_IDLE;
4185 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4186 delay = VBUS_REG_CHECK_DELAY;
4187 work = 1;
4188 mdwc->vbus_retry_count++;
4189 } else if (ret) {
4190 dev_err(mdwc->dev, "unable to start host\n");
4191 mdwc->otg_state = OTG_STATE_A_IDLE;
4192 goto ret;
4193 }
4194 }
4195 break;
4196
4197 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304198 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4199 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004200 dwc3_otg_start_host(mdwc, 0);
4201 mdwc->otg_state = OTG_STATE_B_IDLE;
4202 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304203 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004204 work = 1;
4205 } else {
4206 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004207 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004208 if (dwc)
4209 pm_runtime_resume(&dwc->xhci->dev);
4210 }
4211 break;
4212
4213 default:
4214 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4215
4216 }
4217
4218 if (work)
4219 schedule_delayed_work(&mdwc->sm_work, delay);
4220
4221ret:
4222 return;
4223}
4224
4225#ifdef CONFIG_PM_SLEEP
4226static int dwc3_msm_pm_suspend(struct device *dev)
4227{
4228 int ret = 0;
4229 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4230 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4231
4232 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004233 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004234
4235 flush_workqueue(mdwc->dwc3_wq);
4236 if (!atomic_read(&dwc->in_lpm)) {
4237 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4238 return -EBUSY;
4239 }
4240
4241 ret = dwc3_msm_suspend(mdwc);
4242 if (!ret)
4243 atomic_set(&mdwc->pm_suspended, 1);
4244
4245 return ret;
4246}
4247
4248static int dwc3_msm_pm_resume(struct device *dev)
4249{
4250 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004251 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004252
4253 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004254 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004255
Mayank Rana511f3b22016-08-02 12:00:11 -07004256 /* flush to avoid race in read/write of pm_suspended */
4257 flush_workqueue(mdwc->dwc3_wq);
4258 atomic_set(&mdwc->pm_suspended, 0);
4259
4260 /* kick in otg state machine */
4261 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4262
4263 return 0;
4264}
4265#endif
4266
4267#ifdef CONFIG_PM
4268static int dwc3_msm_runtime_idle(struct device *dev)
4269{
Mayank Rana08e41922017-03-02 15:25:48 -08004270 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4271 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4272
Mayank Rana511f3b22016-08-02 12:00:11 -07004273 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004274 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004275
4276 return 0;
4277}
4278
4279static int dwc3_msm_runtime_suspend(struct device *dev)
4280{
4281 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004282 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004283
4284 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004285 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004286
4287 return dwc3_msm_suspend(mdwc);
4288}
4289
4290static int dwc3_msm_runtime_resume(struct device *dev)
4291{
4292 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004293 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004294
4295 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004296 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004297
4298 return dwc3_msm_resume(mdwc);
4299}
4300#endif
4301
4302static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4303 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4304 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4305 dwc3_msm_runtime_idle)
4306};
4307
4308static const struct of_device_id of_dwc3_matach[] = {
4309 {
4310 .compatible = "qcom,dwc-usb3-msm",
4311 },
4312 { },
4313};
4314MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4315
4316static struct platform_driver dwc3_msm_driver = {
4317 .probe = dwc3_msm_probe,
4318 .remove = dwc3_msm_remove,
4319 .driver = {
4320 .name = "msm-dwc3",
4321 .pm = &dwc3_msm_dev_pm_ops,
4322 .of_match_table = of_dwc3_matach,
4323 },
4324};
4325
4326MODULE_LICENSE("GPL v2");
4327MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4328
4329static int dwc3_msm_init(void)
4330{
4331 return platform_driver_register(&dwc3_msm_driver);
4332}
4333module_init(dwc3_msm_init);
4334
4335static void __exit dwc3_msm_exit(void)
4336{
4337 platform_driver_unregister(&dwc3_msm_driver);
4338}
4339module_exit(dwc3_msm_exit);