blob: 0dc81d2b4ec47d9ce9424d789a92420c8663877c [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
58/* time out to wait for USB cable status notification (in ms)*/
59#define SM_INIT_TIMEOUT 30000
60
61/* AHB2PHY register offsets */
62#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
63
64/* AHB2PHY read/write waite value */
65#define ONE_READ_WRITE_WAIT 0x11
66
67/* cpu to fix usb interrupt */
68static int cpu_to_affin;
69module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
70MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
71
Mayank Ranaf70d8212017-06-12 14:02:07 -070072/* override for USB speed */
73static int override_usb_speed;
74module_param(override_usb_speed, int, 0644);
75MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
76
Mayank Rana511f3b22016-08-02 12:00:11 -070077/* XHCI registers */
78#define USB3_HCSPARAMS1 (0x4)
79#define USB3_PORTSC (0x420)
80
81/**
82 * USB QSCRATCH Hardware registers
83 *
84 */
85#define QSCRATCH_REG_OFFSET (0x000F8800)
86#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
87#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
88#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
89#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
90
91#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
92#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
93#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
94#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
95#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
96
97/* QSCRATCH_GENERAL_CFG register bit offset */
98#define PIPE_UTMI_CLK_SEL BIT(0)
99#define PIPE3_PHYSTATUS_SW BIT(3)
100#define PIPE_UTMI_CLK_DIS BIT(8)
101
102#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
103#define UTMI_OTG_VBUS_VALID BIT(20)
104#define SW_SESSVLD_SEL BIT(28)
105
106#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
107#define LANE0_PWR_PRESENT BIT(24)
108
109/* GSI related registers */
110#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
111#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
112
113#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
114#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
115#define GSI_CLK_EN_MASK BIT(12)
116#define BLOCK_GSI_WR_GO_MASK BIT(1)
117#define GSI_EN_MASK BIT(0)
118
119#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
120#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
121#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
122#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
123
124#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
125#define GSI_WR_CTRL_STATE_MASK BIT(15)
126
Mayank Ranaf4918d32016-12-15 13:35:55 -0800127#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
128#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
129#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
130#define DWC3_GEVENT_TYPE_GSI 0x3
131
Mayank Rana511f3b22016-08-02 12:00:11 -0700132struct dwc3_msm_req_complete {
133 struct list_head list_item;
134 struct usb_request *req;
135 void (*orig_complete)(struct usb_ep *ep,
136 struct usb_request *req);
137};
138
139enum dwc3_id_state {
140 DWC3_ID_GROUND = 0,
141 DWC3_ID_FLOAT,
142};
143
144/* for type c cable */
145enum plug_orientation {
146 ORIENTATION_NONE,
147 ORIENTATION_CC1,
148 ORIENTATION_CC2,
149};
150
Mayank Ranad339abe2017-05-31 09:19:49 -0700151enum msm_usb_irq {
152 HS_PHY_IRQ,
153 PWR_EVNT_IRQ,
154 DP_HS_PHY_IRQ,
155 DM_HS_PHY_IRQ,
156 SS_PHY_IRQ,
157 USB_MAX_IRQ
158};
159
160struct usb_irq {
161 char *name;
162 int irq;
163 bool enable;
164};
165
166static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
167 {"hs_phy_irq", 0},
168 {"pwr_event_irq", 0},
169 {"dp_hs_phy_irq", 0},
170 {"dm_hs_phy_irq", 0},
171 {"ss_phy_irq", 0},
172};
173
Mayank Rana511f3b22016-08-02 12:00:11 -0700174/* Input bits to state machine (mdwc->inputs) */
175
176#define ID 0
177#define B_SESS_VLD 1
178#define B_SUSPEND 2
179
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530180#define PM_QOS_SAMPLE_SEC 2
181#define PM_QOS_THRESHOLD 400
182
Mayank Rana511f3b22016-08-02 12:00:11 -0700183struct dwc3_msm {
184 struct device *dev;
185 void __iomem *base;
186 void __iomem *ahb2phy_base;
187 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700188 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700189 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
190 struct list_head req_complete_list;
191 struct clk *xo_clk;
192 struct clk *core_clk;
193 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800194 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700195 struct clk *iface_clk;
196 struct clk *sleep_clk;
197 struct clk *utmi_clk;
198 unsigned int utmi_clk_rate;
199 struct clk *utmi_clk_src;
200 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530201 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700202 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530203 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct regulator *dwc3_gdsc;
205
206 struct usb_phy *hs_phy, *ss_phy;
207
208 struct dbm *dbm;
209
210 /* VBUS regulator for host mode */
211 struct regulator *vbus_reg;
212 int vbus_retry_count;
213 bool resume_pending;
214 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700215 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700216 struct work_struct resume_work;
217 struct work_struct restart_usb_work;
218 bool in_restart;
219 struct workqueue_struct *dwc3_wq;
220 struct delayed_work sm_work;
221 unsigned long inputs;
222 unsigned int max_power;
223 bool charging_disabled;
224 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700225 u32 bus_perf_client;
226 struct msm_bus_scale_pdata *bus_scale_table;
227 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700228 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700229 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800230 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 unsigned int tx_fifo_size;
232 bool vbus_active;
233 bool suspend;
234 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700235 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700236 enum dwc3_id_state id_state;
237 unsigned long lpm_flags;
238#define MDWC3_SS_PHY_SUSPEND BIT(0)
239#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
240#define MDWC3_POWER_COLLAPSE BIT(2)
241
242 unsigned int irq_to_affin;
243 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530244 struct notifier_block usbdev_nb;
245 bool hc_died;
Mayank Rana511f3b22016-08-02 12:00:11 -0700246
247 struct extcon_dev *extcon_vbus;
248 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800249 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700250 struct notifier_block vbus_nb;
251 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800252 struct notifier_block eud_event_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700253
Jack Pham4d4e9342016-12-07 19:25:02 -0800254 struct notifier_block host_nb;
255
Mayank Rana511f3b22016-08-02 12:00:11 -0700256 atomic_t in_p3;
257 unsigned int lpm_to_suspend_delay;
258 bool init;
259 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800260 u32 num_gsi_event_buffers;
261 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530262 int pm_qos_latency;
263 struct pm_qos_request pm_qos_req_dma;
264 struct delayed_work perf_vote_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700265};
266
267#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
268#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
269#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
270
271#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
272#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
273#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
274
275#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
276#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
277#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
278
279#define DSTS_CONNECTSPD_SS 0x4
280
281
282static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
283static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800284static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700285
286static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
287{
288
289 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
290 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
291 && (speed <= dwc->maximum_speed));
292}
293
Mayank Rana511f3b22016-08-02 12:00:11 -0700294/**
295 *
296 * Read register with debug info.
297 *
298 * @base - DWC3 base virtual address.
299 * @offset - register offset.
300 *
301 * @return u32
302 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700303static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700304{
305 u32 val = ioread32(base + offset);
306 return val;
307}
308
309/**
310 * Read register masked field with debug info.
311 *
312 * @base - DWC3 base virtual address.
313 * @offset - register offset.
314 * @mask - register bitmask.
315 *
316 * @return u32
317 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700318static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700319 u32 offset,
320 const u32 mask)
321{
Mayank Ranad796cab2017-07-11 15:34:12 -0700322 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700323 u32 val = ioread32(base + offset);
324
325 val &= mask; /* clear other bits */
326 val >>= shift;
327 return val;
328}
329
330/**
331 *
332 * Write register with debug info.
333 *
334 * @base - DWC3 base virtual address.
335 * @offset - register offset.
336 * @val - value to write.
337 *
338 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700339static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700340{
341 iowrite32(val, base + offset);
342}
343
344/**
345 * Write register masked field with debug info.
346 *
347 * @base - DWC3 base virtual address.
348 * @offset - register offset.
349 * @mask - register bitmask.
350 * @val - value to write.
351 *
352 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700353static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700354 const u32 mask, u32 val)
355{
Mayank Ranad796cab2017-07-11 15:34:12 -0700356 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700357 u32 tmp = ioread32(base + offset);
358
359 tmp &= ~mask; /* clear written bits */
360 val = tmp | (val << shift);
361 iowrite32(val, base + offset);
362}
363
364/**
365 * Write register and read back masked value to confirm it is written
366 *
367 * @base - DWC3 base virtual address.
368 * @offset - register offset.
369 * @mask - register bitmask specifying what should be updated
370 * @val - value to write.
371 *
372 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700373static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700374 const u32 mask, u32 val)
375{
376 u32 write_val, tmp = ioread32(base + offset);
377
378 tmp &= ~mask; /* retain other bits */
379 write_val = tmp | val;
380
381 iowrite32(write_val, base + offset);
382
383 /* Read back to see if val was written */
384 tmp = ioread32(base + offset);
385 tmp &= mask; /* clear other bits */
386
387 if (tmp != val)
388 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
389 __func__, val, offset);
390}
391
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800392static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
393{
394 int i, num_ports;
395 u32 reg;
396
397 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
398 num_ports = HCS_MAX_PORTS(reg);
399
400 for (i = 0; i < num_ports; i++) {
401 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
402 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
403 return true;
404 }
405
406 return false;
407}
408
Mayank Rana511f3b22016-08-02 12:00:11 -0700409static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
410{
411 int i, num_ports;
412 u32 reg;
413
414 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
415 num_ports = HCS_MAX_PORTS(reg);
416
417 for (i = 0; i < num_ports; i++) {
418 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
419 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
420 return true;
421 }
422
423 return false;
424}
425
426static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
427{
428 u8 speed;
429
430 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
431 return !!(speed & DSTS_CONNECTSPD_SS);
432}
433
434static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
435{
436 if (mdwc->in_host_mode)
437 return dwc3_msm_is_host_superspeed(mdwc);
438
439 return dwc3_msm_is_dev_superspeed(mdwc);
440}
441
442#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
443/**
444 * Configure the DBM with the BAM's data fifo.
445 * This function is called by the USB BAM Driver
446 * upon initialization.
447 *
448 * @ep - pointer to usb endpoint.
449 * @addr - address of data fifo.
450 * @size - size of data fifo.
451 *
452 */
453int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
454 u32 size, u8 dst_pipe_idx)
455{
456 struct dwc3_ep *dep = to_dwc3_ep(ep);
457 struct dwc3 *dwc = dep->dwc;
458 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
459
460 dev_dbg(mdwc->dev, "%s\n", __func__);
461
462 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
463 dst_pipe_idx);
464}
465
466
467/**
468* Cleanups for msm endpoint on request complete.
469*
470* Also call original request complete.
471*
472* @usb_ep - pointer to usb_ep instance.
473* @request - pointer to usb_request instance.
474*
475* @return int - 0 on success, negative on error.
476*/
477static void dwc3_msm_req_complete_func(struct usb_ep *ep,
478 struct usb_request *request)
479{
480 struct dwc3_ep *dep = to_dwc3_ep(ep);
481 struct dwc3 *dwc = dep->dwc;
482 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
483 struct dwc3_msm_req_complete *req_complete = NULL;
484
485 /* Find original request complete function and remove it from list */
486 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
487 if (req_complete->req == request)
488 break;
489 }
490 if (!req_complete || req_complete->req != request) {
491 dev_err(dep->dwc->dev, "%s: could not find the request\n",
492 __func__);
493 return;
494 }
495 list_del(&req_complete->list_item);
496
497 /*
498 * Release another one TRB to the pool since DBM queue took 2 TRBs
499 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
500 * released only one.
501 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700502 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700503
504 /* Unconfigure dbm ep */
505 dbm_ep_unconfig(mdwc->dbm, dep->number);
506
507 /*
508 * If this is the last endpoint we unconfigured, than reset also
509 * the event buffers; unless unconfiguring the ep due to lpm,
510 * in which case the event buffer only gets reset during the
511 * block reset.
512 */
513 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
514 !dbm_reset_ep_after_lpm(mdwc->dbm))
515 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
516
517 /*
518 * Call original complete function, notice that dwc->lock is already
519 * taken by the caller of this function (dwc3_gadget_giveback()).
520 */
521 request->complete = req_complete->orig_complete;
522 if (request->complete)
523 request->complete(ep, request);
524
525 kfree(req_complete);
526}
527
528
529/**
530* Helper function
531*
532* Reset DBM endpoint.
533*
534* @mdwc - pointer to dwc3_msm instance.
535* @dep - pointer to dwc3_ep instance.
536*
537* @return int - 0 on success, negative on error.
538*/
539static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
540{
541 int ret;
542
543 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
544
545 /* Reset the dbm endpoint */
546 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
547 if (ret) {
548 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
549 __func__);
550 return ret;
551 }
552
553 /*
554 * The necessary delay between asserting and deasserting the dbm ep
555 * reset is based on the number of active endpoints. If there is more
556 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
557 * delay will suffice.
558 */
559 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
560 usleep_range(1000, 1200);
561 else
562 udelay(10);
563 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
564 if (ret) {
565 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
566 __func__);
567 return ret;
568 }
569
570 return 0;
571}
572
573/**
574* Reset the DBM endpoint which is linked to the given USB endpoint.
575*
576* @usb_ep - pointer to usb_ep instance.
577*
578* @return int - 0 on success, negative on error.
579*/
580
581int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
582{
583 struct dwc3_ep *dep = to_dwc3_ep(ep);
584 struct dwc3 *dwc = dep->dwc;
585 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
586
587 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
588}
589EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
590
591
592/**
593* Helper function.
594* See the header of the dwc3_msm_ep_queue function.
595*
596* @dwc3_ep - pointer to dwc3_ep instance.
597* @req - pointer to dwc3_request instance.
598*
599* @return int - 0 on success, negative on error.
600*/
601static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
602{
603 struct dwc3_trb *trb;
604 struct dwc3_trb *trb_link;
605 struct dwc3_gadget_ep_cmd_params params;
606 u32 cmd;
607 int ret = 0;
608
Mayank Rana83ad5822016-08-09 14:17:22 -0700609 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700610 * this request is issued with start transfer. The request will be out
611 * from this list in 2 cases. The first is that the transfer will be
612 * completed (not if the transfer is endless using a circular TRBs with
613 * with link TRB). The second case is an option to do stop stransfer,
614 * this can be initiated by the function driver when calling dequeue.
615 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700616 req->started = true;
617 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700618
619 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700620 trb = &dep->trb_pool[dep->trb_enqueue];
621 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700622 memset(trb, 0, sizeof(*trb));
623
624 req->trb = trb;
625 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
626 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
627 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
628 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
629 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
630
631 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700632 trb_link = &dep->trb_pool[dep->trb_enqueue];
633 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700634 memset(trb_link, 0, sizeof(*trb_link));
635
636 trb_link->bpl = lower_32_bits(req->trb_dma);
637 trb_link->bph = DBM_TRB_BIT |
638 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
639 trb_link->size = 0;
640 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
641
642 /*
643 * Now start the transfer
644 */
645 memset(&params, 0, sizeof(params));
646 params.param0 = 0; /* TDAddr High */
647 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
648
649 /* DBM requires IOC to be set */
650 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700651 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700652 if (ret < 0) {
653 dev_dbg(dep->dwc->dev,
654 "%s: failed to send STARTTRANSFER command\n",
655 __func__);
656
657 list_del(&req->list);
658 return ret;
659 }
660 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700661 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700662
663 return ret;
664}
665
666/**
667* Queue a usb request to the DBM endpoint.
668* This function should be called after the endpoint
669* was enabled by the ep_enable.
670*
671* This function prepares special structure of TRBs which
672* is familiar with the DBM HW, so it will possible to use
673* this endpoint in DBM mode.
674*
675* The TRBs prepared by this function, is one normal TRB
676* which point to a fake buffer, followed by a link TRB
677* that points to the first TRB.
678*
679* The API of this function follow the regular API of
680* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
681*
682* @usb_ep - pointer to usb_ep instance.
683* @request - pointer to usb_request instance.
684* @gfp_flags - possible flags.
685*
686* @return int - 0 on success, negative on error.
687*/
688static int dwc3_msm_ep_queue(struct usb_ep *ep,
689 struct usb_request *request, gfp_t gfp_flags)
690{
691 struct dwc3_request *req = to_dwc3_request(request);
692 struct dwc3_ep *dep = to_dwc3_ep(ep);
693 struct dwc3 *dwc = dep->dwc;
694 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
695 struct dwc3_msm_req_complete *req_complete;
696 unsigned long flags;
697 int ret = 0, size;
698 u8 bam_pipe;
699 bool producer;
700 bool disable_wb;
701 bool internal_mem;
702 bool ioc;
703 bool superspeed;
704
705 if (!(request->udc_priv & MSM_SPS_MODE)) {
706 /* Not SPS mode, call original queue */
707 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
708 __func__);
709
710 return (mdwc->original_ep_ops[dep->number])->queue(ep,
711 request,
712 gfp_flags);
713 }
714
715 /* HW restriction regarding TRB size (8KB) */
716 if (req->request.length < 0x2000) {
717 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
718 return -EINVAL;
719 }
720
721 /*
722 * Override req->complete function, but before doing that,
723 * store it's original pointer in the req_complete_list.
724 */
725 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
726 if (!req_complete)
727 return -ENOMEM;
728
729 req_complete->req = request;
730 req_complete->orig_complete = request->complete;
731 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
732 request->complete = dwc3_msm_req_complete_func;
733
734 /*
735 * Configure the DBM endpoint
736 */
737 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
738 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
739 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
740 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
741 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
742
743 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
744 disable_wb, internal_mem, ioc);
745 if (ret < 0) {
746 dev_err(mdwc->dev,
747 "error %d after calling dbm_ep_config\n", ret);
748 return ret;
749 }
750
751 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
752 __func__, request, ep->name, request->length);
753 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
754 dbm_event_buffer_config(mdwc->dbm,
755 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
756 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
757 DWC3_GEVNTSIZ_SIZE(size));
758
759 /*
760 * We must obtain the lock of the dwc3 core driver,
761 * including disabling interrupts, so we will be sure
762 * that we are the only ones that configure the HW device
763 * core and ensure that we queuing the request will finish
764 * as soon as possible so we will release back the lock.
765 */
766 spin_lock_irqsave(&dwc->lock, flags);
767 if (!dep->endpoint.desc) {
768 dev_err(mdwc->dev,
769 "%s: trying to queue request %p to disabled ep %s\n",
770 __func__, request, ep->name);
771 ret = -EPERM;
772 goto err;
773 }
774
775 if (dep->number == 0 || dep->number == 1) {
776 dev_err(mdwc->dev,
777 "%s: trying to queue dbm request %p to control ep %s\n",
778 __func__, request, ep->name);
779 ret = -EPERM;
780 goto err;
781 }
782
783
Mayank Rana83ad5822016-08-09 14:17:22 -0700784 if (dep->trb_dequeue != dep->trb_enqueue ||
785 !list_empty(&dep->pending_list)
786 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700787 dev_err(mdwc->dev,
788 "%s: trying to queue dbm request %p tp ep %s\n",
789 __func__, request, ep->name);
790 ret = -EPERM;
791 goto err;
792 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700793 dep->trb_dequeue = 0;
794 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700795 }
796
797 ret = __dwc3_msm_ep_queue(dep, req);
798 if (ret < 0) {
799 dev_err(mdwc->dev,
800 "error %d after calling __dwc3_msm_ep_queue\n", ret);
801 goto err;
802 }
803
804 spin_unlock_irqrestore(&dwc->lock, flags);
805 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
806 dbm_set_speed(mdwc->dbm, (u8)superspeed);
807
808 return 0;
809
810err:
811 spin_unlock_irqrestore(&dwc->lock, flags);
812 kfree(req_complete);
813 return ret;
814}
815
816/*
817* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
818*
819* @usb_ep - pointer to usb_ep instance.
820*
821* @return int - XferRscIndex
822*/
823static inline int gsi_get_xfer_index(struct usb_ep *ep)
824{
825 struct dwc3_ep *dep = to_dwc3_ep(ep);
826
827 return dep->resource_index;
828}
829
830/*
831* Fills up the GSI channel information needed in call to IPA driver
832* for GSI channel creation.
833*
834* @usb_ep - pointer to usb_ep instance.
835* @ch_info - output parameter with requested channel info
836*/
837static void gsi_get_channel_info(struct usb_ep *ep,
838 struct gsi_channel_info *ch_info)
839{
840 struct dwc3_ep *dep = to_dwc3_ep(ep);
841 int last_trb_index = 0;
842 struct dwc3 *dwc = dep->dwc;
843 struct usb_gsi_request *request = ch_info->ch_req;
844
845 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
846 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700847 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
848
Mayank Rana511f3b22016-08-02 12:00:11 -0700849 ch_info->depcmd_hi_addr = 0;
850
851 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
852 &dep->trb_pool[0]);
853 /* Convert to multipled of 1KB */
854 ch_info->const_buffer_size = request->buf_len/1024;
855
856 /* IN direction */
857 if (dep->direction) {
858 /*
859 * Multiply by size of each TRB for xfer_ring_len in bytes.
860 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
861 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
862 */
863 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
864 last_trb_index = 2 * request->num_bufs + 2;
865 } else { /* OUT direction */
866 /*
867 * Multiply by size of each TRB for xfer_ring_len in bytes.
868 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
869 * LINK TRB.
870 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700871 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
872 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700873 }
874
875 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
876 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
877 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
878 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
879 DWC3_GEVNTCOUNT(ep->ep_intr_num));
880 ch_info->gevntcount_hi_addr = 0;
881
882 dev_dbg(dwc->dev,
883 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
884 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
885 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
886}
887
888/*
889* Perform StartXfer on GSI EP. Stores XferRscIndex.
890*
891* @usb_ep - pointer to usb_ep instance.
892*
893* @return int - 0 on success
894*/
895static int gsi_startxfer_for_ep(struct usb_ep *ep)
896{
897 int ret;
898 struct dwc3_gadget_ep_cmd_params params;
899 u32 cmd;
900 struct dwc3_ep *dep = to_dwc3_ep(ep);
901 struct dwc3 *dwc = dep->dwc;
902
903 memset(&params, 0, sizeof(params));
904 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
905 params.param0 |= (ep->ep_intr_num << 16);
906 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
907 &dep->trb_pool[0]));
908 cmd = DWC3_DEPCMD_STARTTRANSFER;
909 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700910 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700911
912 if (ret < 0)
913 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700914 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700915 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
916 return ret;
917}
918
919/*
920* Store Ring Base and Doorbell Address for GSI EP
921* for GSI channel creation.
922*
923* @usb_ep - pointer to usb_ep instance.
924* @dbl_addr - Doorbell address obtained from IPA driver
925*/
926static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
927{
928 struct dwc3_ep *dep = to_dwc3_ep(ep);
929 struct dwc3 *dwc = dep->dwc;
930 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
931 int n = ep->ep_intr_num - 1;
932
933 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
934 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
935 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
936
937 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
938 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
939 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
940 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
941}
942
943/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700944* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700945*
946* @usb_ep - pointer to usb_ep instance.
947* @request - pointer to GSI request. This is used to pass in the
948* address of the GSI doorbell obtained from IPA driver
949*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700950static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700951{
952 void __iomem *gsi_dbl_address_lsb;
953 void __iomem *gsi_dbl_address_msb;
954 dma_addr_t offset;
955 u64 dbl_addr = *((u64 *)request->buf_base_addr);
956 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
957 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700958 struct dwc3_ep *dep = to_dwc3_ep(ep);
959 struct dwc3 *dwc = dep->dwc;
960 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700961 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
962 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700963
964 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
965 dbl_lo_addr, sizeof(u32));
966 if (!gsi_dbl_address_lsb)
967 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
968
969 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
970 dbl_hi_addr, sizeof(u32));
971 if (!gsi_dbl_address_msb)
972 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
973
974 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700975 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
976 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700977
978 writel_relaxed(offset, gsi_dbl_address_lsb);
979 writel_relaxed(0, gsi_dbl_address_msb);
980}
981
982/*
983* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
984*
985* @usb_ep - pointer to usb_ep instance.
986* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
987*
988* @return int - 0 on success
989*/
990static int gsi_updatexfer_for_ep(struct usb_ep *ep,
991 struct usb_gsi_request *request)
992{
993 int i;
994 int ret;
995 u32 cmd;
996 int num_trbs = request->num_bufs + 1;
997 struct dwc3_trb *trb;
998 struct dwc3_gadget_ep_cmd_params params;
999 struct dwc3_ep *dep = to_dwc3_ep(ep);
1000 struct dwc3 *dwc = dep->dwc;
1001
1002 for (i = 0; i < num_trbs - 1; i++) {
1003 trb = &dep->trb_pool[i];
1004 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1005 }
1006
1007 memset(&params, 0, sizeof(params));
1008 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1009 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001010 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001011 dep->flags |= DWC3_EP_BUSY;
1012 if (ret < 0)
1013 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1014 return ret;
1015}
1016
1017/*
1018* Perform EndXfer on particular GSI EP.
1019*
1020* @usb_ep - pointer to usb_ep instance.
1021*/
1022static void gsi_endxfer_for_ep(struct usb_ep *ep)
1023{
1024 struct dwc3_ep *dep = to_dwc3_ep(ep);
1025 struct dwc3 *dwc = dep->dwc;
1026
1027 dwc3_stop_active_transfer(dwc, dep->number, true);
1028}
1029
1030/*
1031* Allocates and configures TRBs for GSI EPs.
1032*
1033* @usb_ep - pointer to usb_ep instance.
1034* @request - pointer to GSI request.
1035*
1036* @return int - 0 on success
1037*/
1038static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1039{
1040 int i = 0;
1041 dma_addr_t buffer_addr = req->dma;
1042 struct dwc3_ep *dep = to_dwc3_ep(ep);
1043 struct dwc3 *dwc = dep->dwc;
1044 struct dwc3_trb *trb;
1045 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001046 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001047
Jack Phambbe27962017-03-23 18:42:26 -07001048 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001049 num_trbs * sizeof(struct dwc3_trb),
1050 num_trbs * sizeof(struct dwc3_trb), 0);
1051 if (!dep->trb_dma_pool) {
1052 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1053 dep->name);
1054 return -ENOMEM;
1055 }
1056
1057 dep->num_trbs = num_trbs;
1058
1059 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1060 GFP_KERNEL, &dep->trb_pool_dma);
1061 if (!dep->trb_pool) {
1062 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1063 dep->name);
1064 return -ENOMEM;
1065 }
1066
1067 /* IN direction */
1068 if (dep->direction) {
1069 for (i = 0; i < num_trbs ; i++) {
1070 trb = &dep->trb_pool[i];
1071 memset(trb, 0, sizeof(*trb));
1072 /* Set up first n+1 TRBs for ZLPs */
1073 if (i < (req->num_bufs + 1)) {
1074 trb->bpl = 0;
1075 trb->bph = 0;
1076 trb->size = 0;
1077 trb->ctrl = DWC3_TRBCTL_NORMAL
1078 | DWC3_TRB_CTRL_IOC;
1079 continue;
1080 }
1081
1082 /* Setup n TRBs pointing to valid buffers */
1083 trb->bpl = lower_32_bits(buffer_addr);
1084 trb->bph = 0;
1085 trb->size = 0;
1086 trb->ctrl = DWC3_TRBCTL_NORMAL
1087 | DWC3_TRB_CTRL_IOC;
1088 buffer_addr += req->buf_len;
1089
1090 /* Set up the Link TRB at the end */
1091 if (i == (num_trbs - 1)) {
1092 trb->bpl = dwc3_trb_dma_offset(dep,
1093 &dep->trb_pool[0]);
1094 trb->bph = (1 << 23) | (1 << 21)
1095 | (ep->ep_intr_num << 16);
1096 trb->size = 0;
1097 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1098 | DWC3_TRB_CTRL_HWO;
1099 }
1100 }
1101 } else { /* OUT direction */
1102
1103 for (i = 0; i < num_trbs ; i++) {
1104
1105 trb = &dep->trb_pool[i];
1106 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001107 /* Setup LINK TRB to start with TRB ring */
1108 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001109 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001110 &dep->trb_pool[1]);
1111 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1112 } else if (i == (num_trbs - 1)) {
1113 /* Set up the Link TRB at the end */
1114 trb->bpl = dwc3_trb_dma_offset(dep,
1115 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001116 trb->bph = (1 << 23) | (1 << 21)
1117 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001118 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1119 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001120 } else {
1121 trb->bpl = lower_32_bits(buffer_addr);
1122 trb->size = req->buf_len;
1123 buffer_addr += req->buf_len;
1124 trb->ctrl = DWC3_TRBCTL_NORMAL
1125 | DWC3_TRB_CTRL_IOC
1126 | DWC3_TRB_CTRL_CSP
1127 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001128 }
1129 }
1130 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001131
1132 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1133 trb = &dep->trb_pool[0];
1134 if (trb) {
1135 for (i = 0; i < num_trbs; i++) {
1136 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1137 i, (unsigned long)dwc3_trb_dma_offset(dep,
1138 &dep->trb_pool[i]), trb->bpl, trb->bph,
1139 trb->size, trb->ctrl);
1140 trb++;
1141 }
1142 }
1143
Mayank Rana511f3b22016-08-02 12:00:11 -07001144 return 0;
1145}
1146
1147/*
1148* Frees TRBs for GSI EPs.
1149*
1150* @usb_ep - pointer to usb_ep instance.
1151*
1152*/
1153static void gsi_free_trbs(struct usb_ep *ep)
1154{
1155 struct dwc3_ep *dep = to_dwc3_ep(ep);
1156
1157 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1158 return;
1159
1160 /* Free TRBs and TRB pool for EP */
1161 if (dep->trb_dma_pool) {
1162 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1163 dep->trb_pool_dma);
1164 dma_pool_destroy(dep->trb_dma_pool);
1165 dep->trb_pool = NULL;
1166 dep->trb_pool_dma = 0;
1167 dep->trb_dma_pool = NULL;
1168 }
1169}
1170/*
1171* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1172*
1173* @usb_ep - pointer to usb_ep instance.
1174* @request - pointer to GSI request.
1175*/
1176static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1177{
1178 struct dwc3_ep *dep = to_dwc3_ep(ep);
1179 struct dwc3 *dwc = dep->dwc;
1180 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1181 struct dwc3_gadget_ep_cmd_params params;
1182 const struct usb_endpoint_descriptor *desc = ep->desc;
1183 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001184 u32 reg;
1185 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001186
1187 memset(&params, 0x00, sizeof(params));
1188
1189 /* Configure GSI EP */
1190 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1191 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1192
1193 /* Burst size is only needed in SuperSpeed mode */
1194 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1195 u32 burst = dep->endpoint.maxburst - 1;
1196
1197 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1198 }
1199
1200 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1201 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1202 | DWC3_DEPCFG_STREAM_EVENT_EN;
1203 dep->stream_capable = true;
1204 }
1205
1206 /* Set EP number */
1207 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1208
1209 /* Set interrupter number for GSI endpoints */
1210 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1211
1212 /* Enable XferInProgress and XferComplete Interrupts */
1213 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1214 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1215 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1216 /*
1217 * We must use the lower 16 TX FIFOs even though
1218 * HW might have more
1219 */
1220 /* Remove FIFO Number for GSI EP*/
1221 if (dep->direction)
1222 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1223
1224 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1225
1226 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1227 params.param0, params.param1, params.param2, dep->name);
1228
Mayank Rana83ad5822016-08-09 14:17:22 -07001229 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001230
1231 /* Set XferRsc Index for GSI EP */
1232 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001233 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1234 if (ret)
1235 return;
1236
Mayank Rana511f3b22016-08-02 12:00:11 -07001237 memset(&params, 0x00, sizeof(params));
1238 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001239 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001240 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1241
1242 dep->endpoint.desc = desc;
1243 dep->comp_desc = comp_desc;
1244 dep->type = usb_endpoint_type(desc);
1245 dep->flags |= DWC3_EP_ENABLED;
1246 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1247 reg |= DWC3_DALEPENA_EP(dep->number);
1248 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1249 }
1250
1251}
1252
1253/*
1254* Enables USB wrapper for GSI
1255*
1256* @usb_ep - pointer to usb_ep instance.
1257*/
1258static void gsi_enable(struct usb_ep *ep)
1259{
1260 struct dwc3_ep *dep = to_dwc3_ep(ep);
1261 struct dwc3 *dwc = dep->dwc;
1262 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1263
1264 dwc3_msm_write_reg_field(mdwc->base,
1265 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1266 dwc3_msm_write_reg_field(mdwc->base,
1267 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1268 dwc3_msm_write_reg_field(mdwc->base,
1269 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1270 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1271 dwc3_msm_write_reg_field(mdwc->base,
1272 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1273}
1274
1275/*
1276* Block or allow doorbell towards GSI
1277*
1278* @usb_ep - pointer to usb_ep instance.
1279* @request - pointer to GSI request. In this case num_bufs is used as a bool
1280* to set or clear the doorbell bit
1281*/
1282static void gsi_set_clear_dbell(struct usb_ep *ep,
1283 bool block_db)
1284{
1285
1286 struct dwc3_ep *dep = to_dwc3_ep(ep);
1287 struct dwc3 *dwc = dep->dwc;
1288 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1289
1290 dwc3_msm_write_reg_field(mdwc->base,
1291 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1292}
1293
1294/*
1295* Performs necessary checks before stopping GSI channels
1296*
1297* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1298*/
1299static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1300{
1301 u32 timeout = 1500;
1302 u32 reg = 0;
1303 struct dwc3_ep *dep = to_dwc3_ep(ep);
1304 struct dwc3 *dwc = dep->dwc;
1305 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1306
1307 while (dwc3_msm_read_reg_field(mdwc->base,
1308 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1309 if (!timeout--) {
1310 dev_err(mdwc->dev,
1311 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1312 return false;
1313 }
1314 }
1315 /* Check for U3 only if we are not handling Function Suspend */
1316 if (!f_suspend) {
1317 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1318 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1319 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1320 return false;
1321 }
1322 }
1323
1324 return true;
1325}
1326
1327
1328/**
1329* Performs GSI operations or GSI EP related operations.
1330*
1331* @usb_ep - pointer to usb_ep instance.
1332* @op_data - pointer to opcode related data.
1333* @op - GSI related or GSI EP related op code.
1334*
1335* @return int - 0 on success, negative on error.
1336* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1337*/
1338static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1339 void *op_data, enum gsi_ep_op op)
1340{
1341 u32 ret = 0;
1342 struct dwc3_ep *dep = to_dwc3_ep(ep);
1343 struct dwc3 *dwc = dep->dwc;
1344 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1345 struct usb_gsi_request *request;
1346 struct gsi_channel_info *ch_info;
1347 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001348 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001349
1350 switch (op) {
1351 case GSI_EP_OP_PREPARE_TRBS:
1352 request = (struct usb_gsi_request *)op_data;
1353 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1354 ret = gsi_prepare_trbs(ep, request);
1355 break;
1356 case GSI_EP_OP_FREE_TRBS:
1357 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1358 gsi_free_trbs(ep);
1359 break;
1360 case GSI_EP_OP_CONFIG:
1361 request = (struct usb_gsi_request *)op_data;
1362 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001363 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001364 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001365 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001366 break;
1367 case GSI_EP_OP_STARTXFER:
1368 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001369 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001370 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001371 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001372 break;
1373 case GSI_EP_OP_GET_XFER_IDX:
1374 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1375 ret = gsi_get_xfer_index(ep);
1376 break;
1377 case GSI_EP_OP_STORE_DBL_INFO:
1378 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1379 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1380 break;
1381 case GSI_EP_OP_ENABLE_GSI:
1382 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1383 gsi_enable(ep);
1384 break;
1385 case GSI_EP_OP_GET_CH_INFO:
1386 ch_info = (struct gsi_channel_info *)op_data;
1387 gsi_get_channel_info(ep, ch_info);
1388 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001389 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001390 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001391 dbg_print(0xFF, "RING_DB", 0, ep->name);
1392 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001393 break;
1394 case GSI_EP_OP_UPDATEXFER:
1395 request = (struct usb_gsi_request *)op_data;
1396 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001397 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001398 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001399 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001400 break;
1401 case GSI_EP_OP_ENDXFER:
1402 request = (struct usb_gsi_request *)op_data;
1403 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001404 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001405 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001406 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001407 break;
1408 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1409 block_db = *((bool *)op_data);
1410 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1411 block_db);
1412 gsi_set_clear_dbell(ep, block_db);
1413 break;
1414 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1415 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1416 f_suspend = *((bool *)op_data);
1417 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1418 break;
1419 case GSI_EP_OP_DISABLE:
1420 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1421 ret = ep->ops->disable(ep);
1422 break;
1423 default:
1424 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1425 }
1426
1427 return ret;
1428}
1429
1430/**
1431 * Configure MSM endpoint.
1432 * This function do specific configurations
1433 * to an endpoint which need specific implementaion
1434 * in the MSM architecture.
1435 *
1436 * This function should be called by usb function/class
1437 * layer which need a support from the specific MSM HW
1438 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1439 *
1440 * @ep - a pointer to some usb_ep instance
1441 *
1442 * @return int - 0 on success, negetive on error.
1443 */
1444int msm_ep_config(struct usb_ep *ep)
1445{
1446 struct dwc3_ep *dep = to_dwc3_ep(ep);
1447 struct dwc3 *dwc = dep->dwc;
1448 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1449 struct usb_ep_ops *new_ep_ops;
1450
1451
1452 /* Save original ep ops for future restore*/
1453 if (mdwc->original_ep_ops[dep->number]) {
1454 dev_err(mdwc->dev,
1455 "ep [%s,%d] already configured as msm endpoint\n",
1456 ep->name, dep->number);
1457 return -EPERM;
1458 }
1459 mdwc->original_ep_ops[dep->number] = ep->ops;
1460
1461 /* Set new usb ops as we like */
1462 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1463 if (!new_ep_ops)
1464 return -ENOMEM;
1465
1466 (*new_ep_ops) = (*ep->ops);
1467 new_ep_ops->queue = dwc3_msm_ep_queue;
1468 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1469 ep->ops = new_ep_ops;
1470
1471 /*
1472 * Do HERE more usb endpoint configurations
1473 * which are specific to MSM.
1474 */
1475
1476 return 0;
1477}
1478EXPORT_SYMBOL(msm_ep_config);
1479
1480/**
1481 * Un-configure MSM endpoint.
1482 * Tear down configurations done in the
1483 * dwc3_msm_ep_config function.
1484 *
1485 * @ep - a pointer to some usb_ep instance
1486 *
1487 * @return int - 0 on success, negative on error.
1488 */
1489int msm_ep_unconfig(struct usb_ep *ep)
1490{
1491 struct dwc3_ep *dep = to_dwc3_ep(ep);
1492 struct dwc3 *dwc = dep->dwc;
1493 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1494 struct usb_ep_ops *old_ep_ops;
1495
1496 /* Restore original ep ops */
1497 if (!mdwc->original_ep_ops[dep->number]) {
1498 dev_err(mdwc->dev,
1499 "ep [%s,%d] was not configured as msm endpoint\n",
1500 ep->name, dep->number);
1501 return -EINVAL;
1502 }
1503 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1504 ep->ops = mdwc->original_ep_ops[dep->number];
1505 mdwc->original_ep_ops[dep->number] = NULL;
1506 kfree(old_ep_ops);
1507
1508 /*
1509 * Do HERE more usb endpoint un-configurations
1510 * which are specific to MSM.
1511 */
1512
1513 return 0;
1514}
1515EXPORT_SYMBOL(msm_ep_unconfig);
1516#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1517
1518static void dwc3_resume_work(struct work_struct *w);
1519
1520static void dwc3_restart_usb_work(struct work_struct *w)
1521{
1522 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1523 restart_usb_work);
1524 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1525 unsigned int timeout = 50;
1526
1527 dev_dbg(mdwc->dev, "%s\n", __func__);
1528
1529 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1530 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1531 return;
1532 }
1533
1534 /* guard against concurrent VBUS handling */
1535 mdwc->in_restart = true;
1536
1537 if (!mdwc->vbus_active) {
1538 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1539 dwc->err_evt_seen = false;
1540 mdwc->in_restart = false;
1541 return;
1542 }
1543
Mayank Rana08e41922017-03-02 15:25:48 -08001544 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001545 /* Reset active USB connection */
1546 dwc3_resume_work(&mdwc->resume_work);
1547
1548 /* Make sure disconnect is processed before sending connect */
1549 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1550 msleep(20);
1551
1552 if (!timeout) {
1553 dev_dbg(mdwc->dev,
1554 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001555 dbg_event(0xFF, "ReStart:RT SUSP",
1556 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001557 pm_runtime_suspend(mdwc->dev);
1558 }
1559
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301560 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001561 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301562 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001563 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001564
1565 dwc->err_evt_seen = false;
1566 flush_delayed_work(&mdwc->sm_work);
1567}
1568
Manu Gautam976fdfc2016-08-18 09:27:35 +05301569static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1570 unsigned long action, void *priv)
1571{
1572 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1573 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1574 struct usb_bus *bus = priv;
1575
1576 /* Interested only in recovery when HC dies */
1577 if (action != USB_BUS_DIED)
1578 return 0;
1579
1580 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1581 /* Recovery already under process */
1582 if (mdwc->hc_died)
1583 return 0;
1584
1585 if (bus->controller != &dwc->xhci->dev) {
1586 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1587 return 0;
1588 }
1589
1590 mdwc->hc_died = true;
1591 schedule_delayed_work(&mdwc->sm_work, 0);
1592 return 0;
1593}
1594
1595
Mayank Rana511f3b22016-08-02 12:00:11 -07001596/*
1597 * Check whether the DWC3 requires resetting the ep
1598 * after going to Low Power Mode (lpm)
1599 */
1600bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1601{
1602 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1603 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1604
1605 return dbm_reset_ep_after_lpm(mdwc->dbm);
1606}
1607EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1608
1609/*
1610 * Config Global Distributed Switch Controller (GDSC)
1611 * to support controller power collapse
1612 */
1613static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1614{
1615 int ret;
1616
1617 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1618 return -EPERM;
1619
1620 if (on) {
1621 ret = regulator_enable(mdwc->dwc3_gdsc);
1622 if (ret) {
1623 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1624 return ret;
1625 }
1626 } else {
1627 ret = regulator_disable(mdwc->dwc3_gdsc);
1628 if (ret) {
1629 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1630 return ret;
1631 }
1632 }
1633
1634 return ret;
1635}
1636
1637static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1638{
1639 int ret = 0;
1640
1641 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001642 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001643 /* Using asynchronous block reset to the hardware */
1644 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1645 clk_disable_unprepare(mdwc->utmi_clk);
1646 clk_disable_unprepare(mdwc->sleep_clk);
1647 clk_disable_unprepare(mdwc->core_clk);
1648 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301649 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001650 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301651 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001652 } else {
1653 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301654 ret = reset_control_deassert(mdwc->core_reset);
1655 if (ret)
1656 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001657 ndelay(200);
1658 clk_prepare_enable(mdwc->iface_clk);
1659 clk_prepare_enable(mdwc->core_clk);
1660 clk_prepare_enable(mdwc->sleep_clk);
1661 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001662 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001663 }
1664
1665 return ret;
1666}
1667
1668static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1669{
1670 u32 guctl, gfladj = 0;
1671
1672 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1673 guctl &= ~DWC3_GUCTL_REFCLKPER;
1674
1675 /* GFLADJ register is used starting with revision 2.50a */
1676 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1677 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1678 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1679 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1680 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1681 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1682 }
1683
1684 /* Refer to SNPS Databook Table 6-55 for calculations used */
1685 switch (mdwc->utmi_clk_rate) {
1686 case 19200000:
1687 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1688 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1689 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1690 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1691 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1692 break;
1693 case 24000000:
1694 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1695 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1696 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1697 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1698 break;
1699 default:
1700 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1701 mdwc->utmi_clk_rate);
1702 break;
1703 }
1704
1705 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1706 if (gfladj)
1707 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1708}
1709
1710/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1711static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1712{
1713 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1714 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1715 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1716 BIT(2), 1);
1717
1718 /*
1719 * Enable master clock for RAMs to allow BAM to access RAMs when
1720 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1721 * are seen where RAM clocks get turned OFF in SS mode
1722 */
1723 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1724 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1725
1726}
1727
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001728static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1729{
1730 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1731 vbus_draw_work);
1732 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1733
1734 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1735}
1736
Mayank Rana511f3b22016-08-02 12:00:11 -07001737static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1738{
1739 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001740 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001741 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001742 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001743
1744 switch (event) {
1745 case DWC3_CONTROLLER_ERROR_EVENT:
1746 dev_info(mdwc->dev,
1747 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1748 dwc->irq_cnt);
1749
1750 dwc3_gadget_disable_irq(dwc);
1751
1752 /* prevent core from generating interrupts until recovery */
1753 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1754 reg |= DWC3_GCTL_CORESOFTRESET;
1755 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1756
1757 /* restart USB which performs full reset and reconnect */
1758 schedule_work(&mdwc->restart_usb_work);
1759 break;
1760 case DWC3_CONTROLLER_RESET_EVENT:
1761 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1762 /* HS & SSPHYs get reset as part of core soft reset */
1763 dwc3_msm_qscratch_reg_init(mdwc);
1764 break;
1765 case DWC3_CONTROLLER_POST_RESET_EVENT:
1766 dev_dbg(mdwc->dev,
1767 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1768
1769 /*
1770 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301771 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001772 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301773 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1774 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001775 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1776 dwc3_msm_read_reg(mdwc->base,
1777 QSCRATCH_GENERAL_CFG)
1778 | PIPE_UTMI_CLK_DIS);
1779
1780 usleep_range(2, 5);
1781
1782
1783 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1784 dwc3_msm_read_reg(mdwc->base,
1785 QSCRATCH_GENERAL_CFG)
1786 | PIPE_UTMI_CLK_SEL
1787 | PIPE3_PHYSTATUS_SW);
1788
1789 usleep_range(2, 5);
1790
1791 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1792 dwc3_msm_read_reg(mdwc->base,
1793 QSCRATCH_GENERAL_CFG)
1794 & ~PIPE_UTMI_CLK_DIS);
1795 }
1796
1797 dwc3_msm_update_ref_clk(mdwc);
1798 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1799 break;
1800 case DWC3_CONTROLLER_CONNDONE_EVENT:
1801 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1802 /*
1803 * Add power event if the dbm indicates coming out of L1 by
1804 * interrupt
1805 */
1806 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1807 dwc3_msm_write_reg_field(mdwc->base,
1808 PWR_EVNT_IRQ_MASK_REG,
1809 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1810
1811 atomic_set(&dwc->in_lpm, 0);
1812 break;
1813 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1814 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1815 if (dwc->enable_bus_suspend) {
1816 mdwc->suspend = dwc->b_suspend;
1817 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1818 }
1819 break;
1820 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1821 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001822 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001823 break;
1824 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1825 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001826 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001827 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001828 case DWC3_GSI_EVT_BUF_ALLOC:
1829 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1830
1831 if (!mdwc->num_gsi_event_buffers)
1832 break;
1833
1834 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1835 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1836 GFP_KERNEL);
1837 if (!mdwc->gsi_ev_buff) {
1838 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1839 break;
1840 }
1841
1842 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1843
1844 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1845 if (!evt)
1846 break;
1847 evt->dwc = dwc;
1848 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1849 evt->buf = dma_alloc_coherent(dwc->dev,
1850 DWC3_EVENT_BUFFERS_SIZE,
1851 &evt->dma, GFP_KERNEL);
1852 if (!evt->buf) {
1853 dev_err(dwc->dev,
1854 "can't allocate gsi_evt_buf(%d)\n", i);
1855 break;
1856 }
1857 mdwc->gsi_ev_buff[i] = evt;
1858 }
1859 break;
1860 case DWC3_GSI_EVT_BUF_SETUP:
1861 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1862 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1863 evt = mdwc->gsi_ev_buff[i];
1864 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1865 evt->buf, (unsigned long long) evt->dma,
1866 evt->length);
1867 memset(evt->buf, 0, evt->length);
1868 evt->lpos = 0;
1869 /*
1870 * Primary event buffer is programmed with registers
1871 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1872 * program USB GSI related event buffer with DWC3
1873 * controller.
1874 */
1875 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1876 lower_32_bits(evt->dma));
1877 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1878 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1879 DWC3_GEVENT_TYPE_GSI) |
1880 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1881 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1882 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1883 ((evt->length) & 0xffff));
1884 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1885 }
1886 break;
1887 case DWC3_GSI_EVT_BUF_CLEANUP:
1888 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1889 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1890 evt = mdwc->gsi_ev_buff[i];
1891 evt->lpos = 0;
1892 /*
1893 * Primary event buffer is programmed with registers
1894 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1895 * program USB GSI related event buffer with DWC3
1896 * controller.
1897 */
1898 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1899 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1900 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1901 DWC3_GEVNTSIZ_INTMASK |
1902 DWC3_GEVNTSIZ_SIZE((i+1)));
1903 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1904 }
1905 break;
1906 case DWC3_GSI_EVT_BUF_FREE:
1907 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1908 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1909 evt = mdwc->gsi_ev_buff[i];
1910 if (evt)
1911 dma_free_coherent(dwc->dev, evt->length,
1912 evt->buf, evt->dma);
1913 }
1914 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001915 default:
1916 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1917 break;
1918 }
1919}
1920
1921static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1922{
1923 int ret = 0;
1924
1925 if (core_reset) {
1926 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1927 if (ret)
1928 return;
1929
1930 usleep_range(1000, 1200);
1931 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1932 if (ret)
1933 return;
1934
1935 usleep_range(10000, 12000);
1936 }
1937
1938 if (mdwc->dbm) {
1939 /* Reset the DBM */
1940 dbm_soft_reset(mdwc->dbm, 1);
1941 usleep_range(1000, 1200);
1942 dbm_soft_reset(mdwc->dbm, 0);
1943
1944 /*enable DBM*/
1945 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1946 DBM_EN_MASK, 0x1);
1947 dbm_enable(mdwc->dbm);
1948 }
1949}
1950
1951static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1952{
1953 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1954 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301955 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001956
1957 /* Configure AHB2PHY for one wait state read/write */
1958 if (mdwc->ahb2phy_base) {
1959 clk_prepare_enable(mdwc->cfg_ahb_clk);
1960 val = readl_relaxed(mdwc->ahb2phy_base +
1961 PERIPH_SS_AHB2PHY_TOP_CFG);
1962 if (val != ONE_READ_WRITE_WAIT) {
1963 writel_relaxed(ONE_READ_WRITE_WAIT,
1964 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1965 /* complete above write before configuring USB PHY. */
1966 mb();
1967 }
1968 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1969 }
1970
1971 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001972 dbg_event(0xFF, "dwc3 init",
1973 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301974 ret = dwc3_core_pre_init(dwc);
1975 if (ret) {
1976 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1977 return;
1978 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001979 mdwc->init = true;
1980 }
1981
1982 dwc3_core_init(dwc);
1983 /* Re-configure event buffers */
1984 dwc3_event_buffers_setup(dwc);
1985}
1986
1987static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1988{
1989 unsigned long timeout;
1990 u32 reg = 0;
1991
1992 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301993 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001994 if (!atomic_read(&mdwc->in_p3)) {
1995 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1996 return -EBUSY;
1997 }
1998 }
1999
2000 /* Clear previous L2 events */
2001 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2002 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2003
2004 /* Prepare HSPHY for suspend */
2005 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2006 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2007 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2008
2009 /* Wait for PHY to go into L2 */
2010 timeout = jiffies + msecs_to_jiffies(5);
2011 while (!time_after(jiffies, timeout)) {
2012 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2013 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2014 break;
2015 }
2016 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2017 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2018
2019 /* Clear L2 event bit */
2020 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2021 PWR_EVNT_LPM_IN_L2_MASK);
2022
2023 return 0;
2024}
2025
Mayank Rana511f3b22016-08-02 12:00:11 -07002026static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2027{
2028 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2029 int i, num_ports;
2030 u32 reg;
2031
2032 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2033 if (mdwc->in_host_mode) {
2034 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2035 num_ports = HCS_MAX_PORTS(reg);
2036 for (i = 0; i < num_ports; i++) {
2037 reg = dwc3_msm_read_reg(mdwc->base,
2038 USB3_PORTSC + i*0x10);
2039 if (reg & PORT_PE) {
2040 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2041 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2042 else if (DEV_LOWSPEED(reg))
2043 mdwc->hs_phy->flags |= PHY_LS_MODE;
2044 }
2045 }
2046 } else {
2047 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2048 dwc->gadget.speed == USB_SPEED_FULL)
2049 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2050 else if (dwc->gadget.speed == USB_SPEED_LOW)
2051 mdwc->hs_phy->flags |= PHY_LS_MODE;
2052 }
2053}
2054
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302055static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2056 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002057
Mayank Ranad339abe2017-05-31 09:19:49 -07002058static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2059 struct usb_irq *uirq, unsigned int polarity, bool enable)
2060{
2061 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2062
2063 if (uirq && enable && !uirq->enable) {
2064 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2065 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2066 /* clear any pending interrupt */
2067 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2068 irq_set_irq_type(uirq->irq, polarity);
2069 enable_irq_wake(uirq->irq);
2070 enable_irq(uirq->irq);
2071 uirq->enable = true;
2072 }
2073
2074 if (uirq && !enable && uirq->enable) {
2075 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2076 disable_irq_wake(uirq->irq);
2077 disable_irq_nosync(uirq->irq);
2078 uirq->enable = false;
2079 }
2080}
2081
2082static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2083{
2084 if (!enable)
2085 goto disable_usb_irq;
2086
2087 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2088 configure_usb_wakeup_interrupt(mdwc,
2089 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2090 IRQ_TYPE_EDGE_FALLING, enable);
2091 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2092 configure_usb_wakeup_interrupt(mdwc,
2093 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2094 IRQ_TYPE_EDGE_FALLING, enable);
2095 } else {
2096 configure_usb_wakeup_interrupt(mdwc,
2097 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2098 IRQ_TYPE_EDGE_RISING, true);
2099 configure_usb_wakeup_interrupt(mdwc,
2100 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2101 IRQ_TYPE_EDGE_RISING, true);
2102 }
2103
2104 configure_usb_wakeup_interrupt(mdwc,
2105 &mdwc->wakeup_irq[SS_PHY_IRQ],
2106 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2107 return;
2108
2109disable_usb_irq:
2110 configure_usb_wakeup_interrupt(mdwc,
2111 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2112 configure_usb_wakeup_interrupt(mdwc,
2113 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2114 configure_usb_wakeup_interrupt(mdwc,
2115 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2116}
2117
2118static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2119 struct usb_irq *uirq, bool enable)
2120{
2121 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2122
2123 if (uirq && enable && !uirq->enable) {
2124 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2125 enable_irq_wake(uirq->irq);
2126 enable_irq(uirq->irq);
2127 uirq->enable = true;
2128 }
2129
2130 if (uirq && !enable && uirq->enable) {
2131 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2132 disable_irq_wake(uirq->irq);
2133 disable_irq_nosync(uirq->irq);
2134 uirq->enable = true;
2135 }
2136}
2137
Mayank Rana511f3b22016-08-02 12:00:11 -07002138static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2139{
Mayank Rana83ad5822016-08-09 14:17:22 -07002140 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002141 bool can_suspend_ssphy;
2142 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002143 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002144 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002145
2146 if (atomic_read(&dwc->in_lpm)) {
2147 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
2148 return 0;
2149 }
2150
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302151 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2152 msm_dwc3_perf_vote_update(mdwc, false);
2153
Mayank Rana511f3b22016-08-02 12:00:11 -07002154 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002155 evt = dwc->ev_buf;
2156 if ((evt->flags & DWC3_EVENT_PENDING)) {
2157 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002158 "%s: %d device events pending, abort suspend\n",
2159 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07002160 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002161 }
2162 }
2163
2164 if (!mdwc->vbus_active && dwc->is_drd &&
2165 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2166 /*
2167 * In some cases, the pm_runtime_suspend may be called by
2168 * usb_bam when there is pending lpm flag. However, if this is
2169 * done when cable was disconnected and otg state has not
2170 * yet changed to IDLE, then it means OTG state machine
2171 * is running and we race against it. So cancel LPM for now,
2172 * and OTG state machine will go for LPM later, after completing
2173 * transition to IDLE state.
2174 */
2175 dev_dbg(mdwc->dev,
2176 "%s: cable disconnected while not in idle otg state\n",
2177 __func__);
2178 return -EBUSY;
2179 }
2180
2181 /*
2182 * Check if device is not in CONFIGURED state
2183 * then check controller state of L2 and break
2184 * LPM sequence. Check this for device bus suspend case.
2185 */
2186 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2187 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2188 pr_err("%s(): Trying to go in LPM with state:%d\n",
2189 __func__, dwc->gadget.state);
2190 pr_err("%s(): LPM is not performed.\n", __func__);
2191 return -EBUSY;
2192 }
2193
2194 ret = dwc3_msm_prepare_suspend(mdwc);
2195 if (ret)
2196 return ret;
2197
2198 /* Initialize variables here */
2199 can_suspend_ssphy = !(mdwc->in_host_mode &&
2200 dwc3_msm_is_host_superspeed(mdwc));
2201
2202 /* Disable core irq */
2203 if (dwc->irq)
2204 disable_irq(dwc->irq);
2205
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002206 if (work_busy(&dwc->bh_work))
2207 dbg_event(0xFF, "pend evt", 0);
2208
Mayank Rana511f3b22016-08-02 12:00:11 -07002209 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002210 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002211
2212 dwc3_set_phy_speed_flags(mdwc);
2213 /* Suspend HS PHY */
2214 usb_phy_set_suspend(mdwc->hs_phy, 1);
2215
2216 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002217 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002218 /* indicate phy about SS mode */
2219 if (dwc3_msm_is_superspeed(mdwc))
2220 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2221 usb_phy_set_suspend(mdwc->ss_phy, 1);
2222 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2223 }
2224
2225 /* make sure above writes are completed before turning off clocks */
2226 wmb();
2227
2228 /* Disable clocks */
2229 if (mdwc->bus_aggr_clk)
2230 clk_disable_unprepare(mdwc->bus_aggr_clk);
2231 clk_disable_unprepare(mdwc->utmi_clk);
2232
Hemant Kumar633dc332016-08-10 13:41:05 -07002233 /* Memory core: OFF, Memory periphery: OFF */
2234 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2235 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2236 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2237 }
2238
Mayank Rana511f3b22016-08-02 12:00:11 -07002239 clk_set_rate(mdwc->core_clk, 19200000);
2240 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302241 if (mdwc->noc_aggr_clk)
2242 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002243 /*
2244 * Disable iface_clk only after core_clk as core_clk has FSM
2245 * depedency on iface_clk. Hence iface_clk should be turned off
2246 * after core_clk is turned off.
2247 */
2248 clk_disable_unprepare(mdwc->iface_clk);
2249 /* USB PHY no more requires TCXO */
2250 clk_disable_unprepare(mdwc->xo_clk);
2251
2252 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002253 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002254 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2255 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2256 dwc3_msm_config_gdsc(mdwc, 0);
2257 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002258
Jack Pham9faa51df2017-04-03 18:13:40 -07002259 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002260 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002261 dev_dbg(mdwc->dev, "IOMMU detached\n");
2262 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002263 }
2264
2265 /* Remove bus voting */
2266 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002267 dbg_event(0xFF, "bus_devote_start", 0);
2268 ret = msm_bus_scale_client_update_request(
2269 mdwc->bus_perf_client, 0);
2270 dbg_event(0xFF, "bus_devote_finish", 0);
2271 if (ret)
2272 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002273 }
2274
2275 /*
2276 * release wakeup source with timeout to defer system suspend to
2277 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2278 * event is received.
2279 */
2280 if (mdwc->lpm_to_suspend_delay) {
2281 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2282 mdwc->lpm_to_suspend_delay);
2283 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2284 } else {
2285 pm_relax(mdwc->dev);
2286 }
2287
2288 atomic_set(&dwc->in_lpm, 1);
2289
2290 /*
2291 * with DCP or during cable disconnect, we dont require wakeup
2292 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2293 * case of host bus suspend and device bus suspend.
2294 */
2295 if (mdwc->vbus_active || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002296 if (mdwc->use_pdc_interrupts) {
2297 enable_usb_pdc_interrupt(mdwc, true);
2298 } else {
2299 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2300 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2301 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2302 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002303 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002304 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2305 }
2306
2307 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2308 return 0;
2309}
2310
2311static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2312{
2313 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002314 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002315 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002316 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002317
2318 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2319
2320 if (!atomic_read(&dwc->in_lpm)) {
2321 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2322 return 0;
2323 }
2324
2325 pm_stay_awake(mdwc->dev);
2326
2327 /* Enable bus voting */
2328 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002329 dbg_event(0xFF, "bus_vote_start", 1);
2330 ret = msm_bus_scale_client_update_request(
2331 mdwc->bus_perf_client, 1);
2332 dbg_event(0xFF, "bus_vote_finish", 1);
2333 if (ret)
2334 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002335 }
2336
2337 /* Vote for TCXO while waking up USB HSPHY */
2338 ret = clk_prepare_enable(mdwc->xo_clk);
2339 if (ret)
2340 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2341 __func__, ret);
2342
2343 /* Restore controller power collapse */
2344 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2345 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2346 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302347 ret = reset_control_assert(mdwc->core_reset);
2348 if (ret)
2349 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2350 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002351 /* HW requires a short delay for reset to take place properly */
2352 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302353 ret = reset_control_deassert(mdwc->core_reset);
2354 if (ret)
2355 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2356 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002357 clk_prepare_enable(mdwc->sleep_clk);
2358 }
2359
2360 /*
2361 * Enable clocks
2362 * Turned ON iface_clk before core_clk due to FSM depedency.
2363 */
2364 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302365 if (mdwc->noc_aggr_clk)
2366 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002367
2368 core_clk_rate = mdwc->core_clk_rate;
2369 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2370 core_clk_rate = mdwc->core_clk_rate_hs;
2371 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2372 core_clk_rate);
2373 }
2374
2375 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002376 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002377
2378 /* set Memory core: ON, Memory periphery: ON */
2379 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2380 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2381
Mayank Rana511f3b22016-08-02 12:00:11 -07002382 clk_prepare_enable(mdwc->utmi_clk);
2383 if (mdwc->bus_aggr_clk)
2384 clk_prepare_enable(mdwc->bus_aggr_clk);
2385
2386 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002387 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2388 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002389 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2390 if (mdwc->typec_orientation == ORIENTATION_CC1)
2391 mdwc->ss_phy->flags |= PHY_LANE_A;
2392 if (mdwc->typec_orientation == ORIENTATION_CC2)
2393 mdwc->ss_phy->flags |= PHY_LANE_B;
2394 usb_phy_set_suspend(mdwc->ss_phy, 0);
2395 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2396 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2397 }
2398
2399 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2400 /* Resume HS PHY */
2401 usb_phy_set_suspend(mdwc->hs_phy, 0);
2402
2403 /* Recover from controller power collapse */
2404 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2405 u32 tmp;
2406
Jack Pham9faa51df2017-04-03 18:13:40 -07002407 if (mdwc->iommu_map) {
2408 ret = arm_iommu_attach_device(mdwc->dev,
2409 mdwc->iommu_map);
2410 if (ret)
2411 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2412 ret);
2413 else
2414 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2415 }
2416
Mayank Rana511f3b22016-08-02 12:00:11 -07002417 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2418
2419 dwc3_msm_power_collapse_por(mdwc);
2420
2421 /* Get initial P3 status and enable IN_P3 event */
2422 tmp = dwc3_msm_read_reg_field(mdwc->base,
2423 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2424 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2425 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2426 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2427
2428 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2429 }
2430
2431 atomic_set(&dwc->in_lpm, 0);
2432
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302433 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002434 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302435
Mayank Rana511f3b22016-08-02 12:00:11 -07002436 /* Disable HSPHY auto suspend */
2437 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2438 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2439 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2440 DWC3_GUSB2PHYCFG_SUSPHY));
2441
2442 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2443 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002444 if (mdwc->use_pdc_interrupts) {
2445 enable_usb_pdc_interrupt(mdwc, false);
2446 } else {
2447 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2448 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2449 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2450 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002451 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002452 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2453 }
2454
2455 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2456
Mayank Rana511f3b22016-08-02 12:00:11 -07002457 /* Enable core irq */
2458 if (dwc->irq)
2459 enable_irq(dwc->irq);
2460
2461 /*
2462 * Handle other power events that could not have been handled during
2463 * Low Power Mode
2464 */
2465 dwc3_pwr_event_handler(mdwc);
2466
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302467 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2468 schedule_delayed_work(&mdwc->perf_vote_work,
2469 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2470
Mayank Rana08e41922017-03-02 15:25:48 -08002471 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002472 return 0;
2473}
2474
2475/**
2476 * dwc3_ext_event_notify - callback to handle events from external transceiver
2477 *
2478 * Returns 0 on success
2479 */
2480static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2481{
2482 /* Flush processing any pending events before handling new ones */
2483 flush_delayed_work(&mdwc->sm_work);
2484
2485 if (mdwc->id_state == DWC3_ID_FLOAT) {
2486 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2487 set_bit(ID, &mdwc->inputs);
2488 } else {
2489 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2490 clear_bit(ID, &mdwc->inputs);
2491 }
2492
2493 if (mdwc->vbus_active && !mdwc->in_restart) {
2494 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2495 set_bit(B_SESS_VLD, &mdwc->inputs);
2496 } else {
2497 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2498 clear_bit(B_SESS_VLD, &mdwc->inputs);
2499 }
2500
2501 if (mdwc->suspend) {
2502 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2503 set_bit(B_SUSPEND, &mdwc->inputs);
2504 } else {
2505 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2506 clear_bit(B_SUSPEND, &mdwc->inputs);
2507 }
2508
2509 schedule_delayed_work(&mdwc->sm_work, 0);
2510}
2511
2512static void dwc3_resume_work(struct work_struct *w)
2513{
2514 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002515 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002516 union extcon_property_value val;
2517 unsigned int extcon_id;
2518 struct extcon_dev *edev = NULL;
2519 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002520
2521 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2522
Jack Pham4e9dff72017-04-04 18:05:53 -07002523 if (mdwc->vbus_active) {
2524 edev = mdwc->extcon_vbus;
2525 extcon_id = EXTCON_USB;
2526 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2527 edev = mdwc->extcon_id;
2528 extcon_id = EXTCON_USB_HOST;
2529 }
2530
2531 /* Check speed and Type-C polarity values in order to configure PHY */
2532 if (edev && extcon_get_state(edev, extcon_id)) {
2533 ret = extcon_get_property(edev, extcon_id,
2534 EXTCON_PROP_USB_SS, &val);
2535
2536 /* Use default dwc->maximum_speed if speed isn't reported */
2537 if (!ret)
2538 dwc->maximum_speed = (val.intval == 0) ?
2539 USB_SPEED_HIGH : USB_SPEED_SUPER;
2540
2541 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2542 dwc->maximum_speed = dwc->max_hw_supp_speed;
2543
Mayank Ranaf70d8212017-06-12 14:02:07 -07002544 if (override_usb_speed &&
2545 is_valid_usb_speed(dwc, override_usb_speed)) {
2546 dwc->maximum_speed = override_usb_speed;
2547 dbg_event(0xFF, "override_speed", override_usb_speed);
2548 }
2549
Jack Pham4e9dff72017-04-04 18:05:53 -07002550 dbg_event(0xFF, "speed", dwc->maximum_speed);
2551
2552 ret = extcon_get_property(edev, extcon_id,
2553 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2554 if (ret)
2555 mdwc->typec_orientation = ORIENTATION_NONE;
2556 else
2557 mdwc->typec_orientation = val.intval ?
2558 ORIENTATION_CC2 : ORIENTATION_CC1;
2559
2560 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2561 }
2562
Mayank Rana511f3b22016-08-02 12:00:11 -07002563 /*
2564 * exit LPM first to meet resume timeline from device side.
2565 * resume_pending flag would prevent calling
2566 * dwc3_msm_resume() in case we are here due to system
2567 * wide resume without usb cable connected. This flag is set
2568 * only in case of power event irq in lpm.
2569 */
2570 if (mdwc->resume_pending) {
2571 dwc3_msm_resume(mdwc);
2572 mdwc->resume_pending = false;
2573 }
2574
Mayank Rana08e41922017-03-02 15:25:48 -08002575 if (atomic_read(&mdwc->pm_suspended)) {
2576 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002577 /* let pm resume kick in resume work later */
2578 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002579 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002580 dwc3_ext_event_notify(mdwc);
2581}
2582
2583static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2584{
2585 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2586 u32 irq_stat, irq_clear = 0;
2587
2588 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2589 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2590
2591 /* Check for P3 events */
2592 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2593 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2594 /* Can't tell if entered or exit P3, so check LINKSTATE */
2595 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2596 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2597 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2598 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2599
2600 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2601 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2602 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2603 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2604 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2605 atomic_set(&mdwc->in_p3, 0);
2606 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2607 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2608 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2609 atomic_set(&mdwc->in_p3, 1);
2610 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2611 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2612 }
2613
2614 /* Clear L2 exit */
2615 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2616 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2617 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2618 }
2619
2620 /* Handle exit from L1 events */
2621 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2622 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2623 __func__);
2624 if (usb_gadget_wakeup(&dwc->gadget))
2625 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2626 __func__);
2627 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2628 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2629 }
2630
2631 /* Unhandled events */
2632 if (irq_stat)
2633 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2634 __func__, irq_stat);
2635
2636 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2637}
2638
2639static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2640{
2641 struct dwc3_msm *mdwc = _mdwc;
2642 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2643
2644 dev_dbg(mdwc->dev, "%s\n", __func__);
2645
2646 if (atomic_read(&dwc->in_lpm))
2647 dwc3_resume_work(&mdwc->resume_work);
2648 else
2649 dwc3_pwr_event_handler(mdwc);
2650
Mayank Rana08e41922017-03-02 15:25:48 -08002651 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002652 return IRQ_HANDLED;
2653}
2654
2655static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2656{
2657 struct dwc3_msm *mdwc = data;
2658 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2659
2660 dwc->t_pwr_evt_irq = ktime_get();
2661 dev_dbg(mdwc->dev, "%s received\n", __func__);
2662 /*
2663 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2664 * which interrupts have been triggered, as the clocks are disabled.
2665 * Resume controller by waking up pwr event irq thread.After re-enabling
2666 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2667 * all other power events.
2668 */
2669 if (atomic_read(&dwc->in_lpm)) {
2670 /* set this to call dwc3_msm_resume() */
2671 mdwc->resume_pending = true;
2672 return IRQ_WAKE_THREAD;
2673 }
2674
2675 dwc3_pwr_event_handler(mdwc);
2676 return IRQ_HANDLED;
2677}
2678
2679static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2680 unsigned long action, void *hcpu)
2681{
2682 uint32_t cpu = (uintptr_t)hcpu;
2683 struct dwc3_msm *mdwc =
2684 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2685
2686 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2687 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2688 cpu_to_affin, mdwc->irq_to_affin);
2689 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2690 }
2691
2692 return NOTIFY_OK;
2693}
2694
2695static void dwc3_otg_sm_work(struct work_struct *w);
2696
2697static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2698{
2699 int ret;
2700
2701 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2702 if (IS_ERR(mdwc->dwc3_gdsc))
2703 mdwc->dwc3_gdsc = NULL;
2704
2705 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2706 if (IS_ERR(mdwc->xo_clk)) {
2707 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2708 __func__);
2709 ret = PTR_ERR(mdwc->xo_clk);
2710 return ret;
2711 }
2712 clk_set_rate(mdwc->xo_clk, 19200000);
2713
2714 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2715 if (IS_ERR(mdwc->iface_clk)) {
2716 dev_err(mdwc->dev, "failed to get iface_clk\n");
2717 ret = PTR_ERR(mdwc->iface_clk);
2718 return ret;
2719 }
2720
2721 /*
2722 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2723 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2724 * On newer platform it can run at 150MHz as well.
2725 */
2726 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2727 if (IS_ERR(mdwc->core_clk)) {
2728 dev_err(mdwc->dev, "failed to get core_clk\n");
2729 ret = PTR_ERR(mdwc->core_clk);
2730 return ret;
2731 }
2732
Amit Nischal4d278212016-06-06 17:54:34 +05302733 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2734 if (IS_ERR(mdwc->core_reset)) {
2735 dev_err(mdwc->dev, "failed to get core_reset\n");
2736 return PTR_ERR(mdwc->core_reset);
2737 }
2738
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302739 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302740 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302741 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2742 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302743 }
2744
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302745 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302746 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302747 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2748 mdwc->core_clk_rate);
2749 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2750 if (ret)
2751 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002752
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002753 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2754 (u32 *)&mdwc->core_clk_rate_hs)) {
2755 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2756 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2757 }
2758
Mayank Rana511f3b22016-08-02 12:00:11 -07002759 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2760 if (IS_ERR(mdwc->sleep_clk)) {
2761 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2762 ret = PTR_ERR(mdwc->sleep_clk);
2763 return ret;
2764 }
2765
2766 clk_set_rate(mdwc->sleep_clk, 32000);
2767 mdwc->utmi_clk_rate = 19200000;
2768 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2769 if (IS_ERR(mdwc->utmi_clk)) {
2770 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2771 ret = PTR_ERR(mdwc->utmi_clk);
2772 return ret;
2773 }
2774
2775 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2776 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2777 if (IS_ERR(mdwc->bus_aggr_clk))
2778 mdwc->bus_aggr_clk = NULL;
2779
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302780 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2781 if (IS_ERR(mdwc->noc_aggr_clk))
2782 mdwc->noc_aggr_clk = NULL;
2783
Mayank Rana511f3b22016-08-02 12:00:11 -07002784 if (of_property_match_string(mdwc->dev->of_node,
2785 "clock-names", "cfg_ahb_clk") >= 0) {
2786 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2787 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2788 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2789 mdwc->cfg_ahb_clk = NULL;
2790 if (ret != -EPROBE_DEFER)
2791 dev_err(mdwc->dev,
2792 "failed to get cfg_ahb_clk ret %d\n",
2793 ret);
2794 return ret;
2795 }
2796 }
2797
2798 return 0;
2799}
2800
2801static int dwc3_msm_id_notifier(struct notifier_block *nb,
2802 unsigned long event, void *ptr)
2803{
2804 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002805 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002806 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002807
2808 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2809
2810 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2811
Mayank Rana511f3b22016-08-02 12:00:11 -07002812 if (mdwc->id_state != id) {
2813 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002814 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002815 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2816 }
2817
Mayank Rana511f3b22016-08-02 12:00:11 -07002818 return NOTIFY_DONE;
2819}
2820
2821static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2822 unsigned long event, void *ptr)
2823{
2824 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2825 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002826
2827 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2828
2829 if (mdwc->vbus_active == event)
2830 return NOTIFY_DONE;
2831
Mayank Rana511f3b22016-08-02 12:00:11 -07002832 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002833 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002834 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002835
Mayank Rana511f3b22016-08-02 12:00:11 -07002836 return NOTIFY_DONE;
2837}
Jack Pham4e9dff72017-04-04 18:05:53 -07002838
Mayank Rana51958172017-02-28 14:49:21 -08002839/*
2840 * Handle EUD based soft detach/attach event, and force USB high speed mode
2841 * functionality on receiving soft attach event.
2842 *
2843 * @nb - notifier handler
2844 * @event - event information i.e. soft detach/attach event
2845 * @ptr - extcon_dev pointer
2846 *
2847 * @return int - NOTIFY_DONE always due to EUD
2848 */
2849static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2850 unsigned long event, void *ptr)
2851{
2852 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2853 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002854
2855 dbg_event(0xFF, "EUD_NB", event);
2856 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2857 if (mdwc->vbus_active == event)
2858 return NOTIFY_DONE;
2859
2860 /* Force USB High-Speed enumeration Only */
2861 dwc->maximum_speed = USB_SPEED_HIGH;
2862 dbg_event(0xFF, "Speed", dwc->maximum_speed);
2863 mdwc->vbus_active = event;
2864 if (dwc->is_drd && !mdwc->in_restart)
2865 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002866
Mayank Rana51958172017-02-28 14:49:21 -08002867 return NOTIFY_DONE;
2868}
Mayank Rana511f3b22016-08-02 12:00:11 -07002869
2870static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2871{
2872 struct device_node *node = mdwc->dev->of_node;
2873 struct extcon_dev *edev;
2874 int ret = 0;
2875
2876 if (!of_property_read_bool(node, "extcon"))
2877 return 0;
2878
Mayank Rana51958172017-02-28 14:49:21 -08002879 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002880 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2881 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2882 return PTR_ERR(edev);
2883
2884 if (!IS_ERR(edev)) {
2885 mdwc->extcon_vbus = edev;
2886 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2887 ret = extcon_register_notifier(edev, EXTCON_USB,
2888 &mdwc->vbus_nb);
2889 if (ret < 0) {
2890 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2891 return ret;
2892 }
2893 }
2894
Mayank Rana51958172017-02-28 14:49:21 -08002895 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002896 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2897 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2898 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2899 ret = PTR_ERR(edev);
2900 goto err;
2901 }
2902 }
2903
2904 if (!IS_ERR(edev)) {
2905 mdwc->extcon_id = edev;
2906 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2907 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2908 &mdwc->id_nb);
2909 if (ret < 0) {
2910 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2911 goto err;
2912 }
2913 }
2914
Mayank Rana81bd2e52017-07-26 16:15:15 -07002915 edev = NULL;
Mayank Rana51958172017-02-28 14:49:21 -08002916 /* Use third phandle (optional) for EUD based detach/attach events */
2917 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2918 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2919 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2920 ret = PTR_ERR(edev);
2921 goto err;
2922 }
2923 }
2924
Mayank Rana81bd2e52017-07-26 16:15:15 -07002925 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002926 mdwc->extcon_eud = edev;
2927 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2928 ret = extcon_register_notifier(edev, EXTCON_USB,
2929 &mdwc->eud_event_nb);
2930 if (ret < 0) {
2931 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
2932 goto err1;
2933 }
2934 }
2935
Mayank Rana511f3b22016-08-02 12:00:11 -07002936 return 0;
Mayank Rana51958172017-02-28 14:49:21 -08002937err1:
2938 if (mdwc->extcon_id)
2939 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2940 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002941err:
2942 if (mdwc->extcon_vbus)
2943 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2944 &mdwc->vbus_nb);
2945 return ret;
2946}
2947
Jack Phambbe27962017-03-23 18:42:26 -07002948#define SMMU_BASE 0x10000000 /* Device address range base */
2949#define SMMU_SIZE 0x40000000 /* Device address range size */
2950
2951static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2952{
2953 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07002954 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07002955 int ret;
2956
2957 if (!of_property_read_bool(node, "iommus"))
2958 return 0;
2959
2960 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
2961 SMMU_BASE, SMMU_SIZE);
2962 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
2963 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
2964 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
2965 ret);
2966 return ret;
2967 }
2968 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
2969
2970 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
2971 &atomic_ctx);
2972 if (ret) {
2973 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
2974 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07002975 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07002976 }
2977
Jack Pham283cece2017-04-05 09:58:17 -07002978 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
2979 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
2980 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2981 if (ret) {
2982 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
2983 s1_bypass, ret);
2984 goto release_mapping;
2985 }
2986
Jack Pham9faa51df2017-04-03 18:13:40 -07002987 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
2988 if (ret) {
2989 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
2990 goto release_mapping;
2991 }
2992 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2993
Jack Phambbe27962017-03-23 18:42:26 -07002994 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07002995
2996release_mapping:
2997 arm_iommu_release_mapping(mdwc->iommu_map);
2998 mdwc->iommu_map = NULL;
2999 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003000}
3001
Mayank Rana511f3b22016-08-02 12:00:11 -07003002static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3003 char *buf)
3004{
3005 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3006
3007 if (mdwc->vbus_active)
3008 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3009 if (mdwc->id_state == DWC3_ID_GROUND)
3010 return snprintf(buf, PAGE_SIZE, "host\n");
3011
3012 return snprintf(buf, PAGE_SIZE, "none\n");
3013}
3014
3015static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3016 const char *buf, size_t count)
3017{
3018 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3019
3020 if (sysfs_streq(buf, "peripheral")) {
3021 mdwc->vbus_active = true;
3022 mdwc->id_state = DWC3_ID_FLOAT;
3023 } else if (sysfs_streq(buf, "host")) {
3024 mdwc->vbus_active = false;
3025 mdwc->id_state = DWC3_ID_GROUND;
3026 } else {
3027 mdwc->vbus_active = false;
3028 mdwc->id_state = DWC3_ID_FLOAT;
3029 }
3030
3031 dwc3_ext_event_notify(mdwc);
3032
3033 return count;
3034}
3035
3036static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303037static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003038
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003039static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3040 char *buf)
3041{
3042 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3043 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3044
3045 return snprintf(buf, PAGE_SIZE, "%s\n",
3046 usb_speed_string(dwc->max_hw_supp_speed));
3047}
3048
3049static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3050 const char *buf, size_t count)
3051{
3052 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3053 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3054 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3055
3056 if (sysfs_streq(buf, "high"))
3057 req_speed = USB_SPEED_HIGH;
3058 else if (sysfs_streq(buf, "super"))
3059 req_speed = USB_SPEED_SUPER;
3060
3061 if (req_speed != USB_SPEED_UNKNOWN &&
3062 req_speed != dwc->max_hw_supp_speed) {
3063 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3064 schedule_work(&mdwc->restart_usb_work);
3065 }
3066
3067 return count;
3068}
3069static DEVICE_ATTR_RW(speed);
3070
Mayank Rana511f3b22016-08-02 12:00:11 -07003071static int dwc3_msm_probe(struct platform_device *pdev)
3072{
3073 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3074 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003075 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003076 struct dwc3_msm *mdwc;
3077 struct dwc3 *dwc;
3078 struct resource *res;
3079 void __iomem *tcsr;
3080 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003081 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003082 int ext_hub_reset_gpio;
3083 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003084 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003085
3086 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3087 if (!mdwc)
3088 return -ENOMEM;
3089
3090 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3091 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3092 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3093 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3094 return -EOPNOTSUPP;
3095 }
3096 }
3097
3098 platform_set_drvdata(pdev, mdwc);
3099 mdwc->dev = &pdev->dev;
3100
3101 INIT_LIST_HEAD(&mdwc->req_complete_list);
3102 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3103 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003104 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003105 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303106 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003107
3108 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3109 if (!mdwc->dwc3_wq) {
3110 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3111 return -ENOMEM;
3112 }
3113
3114 /* Get all clks and gdsc reference */
3115 ret = dwc3_msm_get_clk_gdsc(mdwc);
3116 if (ret) {
3117 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
3118 return ret;
3119 }
3120
3121 mdwc->id_state = DWC3_ID_FLOAT;
3122 set_bit(ID, &mdwc->inputs);
3123
3124 mdwc->charging_disabled = of_property_read_bool(node,
3125 "qcom,charging-disabled");
3126
3127 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3128 &mdwc->lpm_to_suspend_delay);
3129 if (ret) {
3130 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3131 mdwc->lpm_to_suspend_delay = 0;
3132 }
3133
Mayank Ranad339abe2017-05-31 09:19:49 -07003134 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3135 for (i = 0; i < USB_MAX_IRQ; i++) {
3136 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3137 IRQF_ONESHOT;
3138 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3139 mdwc->wakeup_irq[i].name);
3140 if (mdwc->wakeup_irq[i].irq < 0) {
3141 /* pwr_evnt_irq is only mandatory irq */
3142 if (!strcmp(mdwc->wakeup_irq[i].name,
3143 "pwr_event_irq")) {
3144 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3145 mdwc->wakeup_irq[i].name);
3146 ret = -EINVAL;
3147 goto err;
3148 }
3149 mdwc->wakeup_irq[i].irq = 0;
3150 } else {
3151 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3152 IRQ_NOAUTOEN);
3153 /* ss_phy_irq is level trigger interrupt */
3154 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3155 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3156 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003157
Mayank Ranad339abe2017-05-31 09:19:49 -07003158 ret = devm_request_threaded_irq(&pdev->dev,
3159 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003160 msm_dwc3_pwr_irq,
3161 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003162 irq_type,
3163 mdwc->wakeup_irq[i].name, mdwc);
3164 if (ret) {
3165 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3166 mdwc->wakeup_irq[i].name, ret);
3167 goto err;
3168 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003169 }
3170 }
3171
3172 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3173 if (!res) {
3174 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3175 } else {
3176 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3177 resource_size(res));
3178 if (IS_ERR_OR_NULL(tcsr)) {
3179 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3180 } else {
3181 /* Enable USB3 on the primary USB port. */
3182 writel_relaxed(0x1, tcsr);
3183 /*
3184 * Ensure that TCSR write is completed before
3185 * USB registers initialization.
3186 */
3187 mb();
3188 }
3189 }
3190
3191 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3192 if (!res) {
3193 dev_err(&pdev->dev, "missing memory base resource\n");
3194 ret = -ENODEV;
3195 goto err;
3196 }
3197
3198 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3199 resource_size(res));
3200 if (!mdwc->base) {
3201 dev_err(&pdev->dev, "ioremap failed\n");
3202 ret = -ENODEV;
3203 goto err;
3204 }
3205
3206 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3207 "ahb2phy_base");
3208 if (res) {
3209 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3210 res->start, resource_size(res));
3211 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3212 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3213 mdwc->ahb2phy_base = NULL;
3214 } else {
3215 /*
3216 * On some targets cfg_ahb_clk depends upon usb gdsc
3217 * regulator. If cfg_ahb_clk is enabled without
3218 * turning on usb gdsc regulator clk is stuck off.
3219 */
3220 dwc3_msm_config_gdsc(mdwc, 1);
3221 clk_prepare_enable(mdwc->cfg_ahb_clk);
3222 /* Configure AHB2PHY for one wait state read/write*/
3223 val = readl_relaxed(mdwc->ahb2phy_base +
3224 PERIPH_SS_AHB2PHY_TOP_CFG);
3225 if (val != ONE_READ_WRITE_WAIT) {
3226 writel_relaxed(ONE_READ_WRITE_WAIT,
3227 mdwc->ahb2phy_base +
3228 PERIPH_SS_AHB2PHY_TOP_CFG);
3229 /* complete above write before using USB PHY */
3230 mb();
3231 }
3232 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3233 dwc3_msm_config_gdsc(mdwc, 0);
3234 }
3235 }
3236
3237 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3238 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3239 if (IS_ERR(mdwc->dbm)) {
3240 dev_err(&pdev->dev, "unable to get dbm device\n");
3241 ret = -EPROBE_DEFER;
3242 goto err;
3243 }
3244 /*
3245 * Add power event if the dbm indicates coming out of L1
3246 * by interrupt
3247 */
3248 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003249 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003250 dev_err(&pdev->dev,
3251 "need pwr_event_irq exiting L1\n");
3252 ret = -EINVAL;
3253 goto err;
3254 }
3255 }
3256 }
3257
3258 ext_hub_reset_gpio = of_get_named_gpio(node,
3259 "qcom,ext-hub-reset-gpio", 0);
3260
3261 if (gpio_is_valid(ext_hub_reset_gpio)
3262 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3263 "qcom,ext-hub-reset-gpio"))) {
3264 /* reset external hub */
3265 gpio_direction_output(ext_hub_reset_gpio, 1);
3266 /*
3267 * Hub reset should be asserted for minimum 5microsec
3268 * before deasserting.
3269 */
3270 usleep_range(5, 1000);
3271 gpio_direction_output(ext_hub_reset_gpio, 0);
3272 }
3273
3274 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3275 &mdwc->tx_fifo_size))
3276 dev_err(&pdev->dev,
3277 "unable to read platform data tx fifo size\n");
3278
3279 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3280 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003281 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3282 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003283 dwc3_set_notifier(&dwc3_msm_notify_event);
3284
Jack Phambbe27962017-03-23 18:42:26 -07003285 ret = dwc3_msm_init_iommu(mdwc);
3286 if (ret)
3287 goto err;
3288
Mayank Rana511f3b22016-08-02 12:00:11 -07003289 /* Assumes dwc3 is the first DT child of dwc3-msm */
3290 dwc3_node = of_get_next_available_child(node, NULL);
3291 if (!dwc3_node) {
3292 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3293 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003294 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003295 }
3296
3297 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3298 if (ret) {
3299 dev_err(&pdev->dev,
3300 "failed to add create dwc3 core\n");
3301 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003302 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003303 }
3304
3305 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3306 of_node_put(dwc3_node);
3307 if (!mdwc->dwc3) {
3308 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3309 goto put_dwc3;
3310 }
3311
3312 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3313 "usb-phy", 0);
3314 if (IS_ERR(mdwc->hs_phy)) {
3315 dev_err(&pdev->dev, "unable to get hsphy device\n");
3316 ret = PTR_ERR(mdwc->hs_phy);
3317 goto put_dwc3;
3318 }
3319 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3320 "usb-phy", 1);
3321 if (IS_ERR(mdwc->ss_phy)) {
3322 dev_err(&pdev->dev, "unable to get ssphy device\n");
3323 ret = PTR_ERR(mdwc->ss_phy);
3324 goto put_dwc3;
3325 }
3326
3327 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3328 if (mdwc->bus_scale_table) {
3329 mdwc->bus_perf_client =
3330 msm_bus_scale_register_client(mdwc->bus_scale_table);
3331 }
3332
3333 dwc = platform_get_drvdata(mdwc->dwc3);
3334 if (!dwc) {
3335 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3336 goto put_dwc3;
3337 }
3338
3339 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3340 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3341
3342 if (cpu_to_affin)
3343 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3344
Mayank Ranaf4918d32016-12-15 13:35:55 -08003345 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3346 &mdwc->num_gsi_event_buffers);
3347
Jack Pham9faa51df2017-04-03 18:13:40 -07003348 /* IOMMU will be reattached upon each resume/connect */
3349 if (mdwc->iommu_map)
3350 arm_iommu_detach_device(mdwc->dev);
3351
Mayank Rana511f3b22016-08-02 12:00:11 -07003352 /*
3353 * Clocks and regulators will not be turned on until the first time
3354 * runtime PM resume is called. This is to allow for booting up with
3355 * charger already connected so as not to disturb PHY line states.
3356 */
3357 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3358 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003359 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3360 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003361 device_init_wakeup(mdwc->dev, 1);
3362
3363 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3364 pm_runtime_get_noresume(mdwc->dev);
3365
3366 ret = dwc3_msm_extcon_register(mdwc);
3367 if (ret)
3368 goto put_dwc3;
3369
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303370 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3371 &mdwc->pm_qos_latency);
3372 if (ret) {
3373 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3374 mdwc->pm_qos_latency = 0;
3375 }
3376
Hemant Kumar8220a982017-01-19 18:11:34 -08003377 mdwc->usb_psy = power_supply_get_by_name("usb");
3378 if (!mdwc->usb_psy) {
3379 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3380 pval.intval = -EINVAL;
3381 } else {
3382 power_supply_get_property(mdwc->usb_psy,
3383 POWER_SUPPLY_PROP_PRESENT, &pval);
3384 }
3385
Mayank Rana511f3b22016-08-02 12:00:11 -07003386 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003387 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003388 EXTCON_USB))
3389 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003390 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003391 EXTCON_USB_HOST))
3392 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003393 else if (!pval.intval) {
3394 /* USB cable is not connected */
3395 schedule_delayed_work(&mdwc->sm_work, 0);
3396 } else {
3397 if (pval.intval > 0)
3398 dev_info(mdwc->dev, "charger detection in progress\n");
3399 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003400
3401 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003402 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003403
Mayank Rana511f3b22016-08-02 12:00:11 -07003404 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3405 if (!dwc->is_drd && host_mode) {
3406 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3407 mdwc->id_state = DWC3_ID_GROUND;
3408 dwc3_ext_event_notify(mdwc);
3409 }
3410
3411 return 0;
3412
3413put_dwc3:
3414 platform_device_put(mdwc->dwc3);
3415 if (mdwc->bus_perf_client)
3416 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Jack Phambbe27962017-03-23 18:42:26 -07003417uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003418 if (mdwc->iommu_map) {
3419 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003420 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003421 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003422err:
3423 return ret;
3424}
3425
3426static int dwc3_msm_remove_children(struct device *dev, void *data)
3427{
3428 device_unregister(dev);
3429 return 0;
3430}
3431
3432static int dwc3_msm_remove(struct platform_device *pdev)
3433{
3434 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003435 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003436 int ret_pm;
3437
3438 device_remove_file(&pdev->dev, &dev_attr_mode);
3439
3440 if (cpu_to_affin)
3441 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3442
3443 /*
3444 * In case of system suspend, pm_runtime_get_sync fails.
3445 * Hence turn ON the clocks manually.
3446 */
3447 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003448 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003449 if (ret_pm < 0) {
3450 dev_err(mdwc->dev,
3451 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303452 if (mdwc->noc_aggr_clk)
3453 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003454 clk_prepare_enable(mdwc->utmi_clk);
3455 clk_prepare_enable(mdwc->core_clk);
3456 clk_prepare_enable(mdwc->iface_clk);
3457 clk_prepare_enable(mdwc->sleep_clk);
3458 if (mdwc->bus_aggr_clk)
3459 clk_prepare_enable(mdwc->bus_aggr_clk);
3460 clk_prepare_enable(mdwc->xo_clk);
3461 }
3462
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303463 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003464 cancel_delayed_work_sync(&mdwc->sm_work);
3465
3466 if (mdwc->hs_phy)
3467 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3468 platform_device_put(mdwc->dwc3);
3469 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3470
Mayank Rana08e41922017-03-02 15:25:48 -08003471 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003472 pm_runtime_disable(mdwc->dev);
3473 pm_runtime_barrier(mdwc->dev);
3474 pm_runtime_put_sync(mdwc->dev);
3475 pm_runtime_set_suspended(mdwc->dev);
3476 device_wakeup_disable(mdwc->dev);
3477
3478 if (mdwc->bus_perf_client)
3479 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3480
3481 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3482 regulator_disable(mdwc->vbus_reg);
3483
Mayank Ranad339abe2017-05-31 09:19:49 -07003484 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3485 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3486 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3487 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3488 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3489 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3490 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3491 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3492 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003493
3494 clk_disable_unprepare(mdwc->utmi_clk);
3495 clk_set_rate(mdwc->core_clk, 19200000);
3496 clk_disable_unprepare(mdwc->core_clk);
3497 clk_disable_unprepare(mdwc->iface_clk);
3498 clk_disable_unprepare(mdwc->sleep_clk);
3499 clk_disable_unprepare(mdwc->xo_clk);
3500 clk_put(mdwc->xo_clk);
3501
3502 dwc3_msm_config_gdsc(mdwc, 0);
3503
Jack Phambbe27962017-03-23 18:42:26 -07003504 if (mdwc->iommu_map) {
3505 if (!atomic_read(&dwc->in_lpm))
3506 arm_iommu_detach_device(mdwc->dev);
3507 arm_iommu_release_mapping(mdwc->iommu_map);
3508 }
3509
Mayank Rana511f3b22016-08-02 12:00:11 -07003510 return 0;
3511}
3512
Jack Pham4d4e9342016-12-07 19:25:02 -08003513static int dwc3_msm_host_notifier(struct notifier_block *nb,
3514 unsigned long event, void *ptr)
3515{
3516 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3517 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3518 struct usb_device *udev = ptr;
3519 union power_supply_propval pval;
3520 unsigned int max_power;
3521
3522 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3523 return NOTIFY_DONE;
3524
3525 if (!mdwc->usb_psy) {
3526 mdwc->usb_psy = power_supply_get_by_name("usb");
3527 if (!mdwc->usb_psy)
3528 return NOTIFY_DONE;
3529 }
3530
3531 /*
3532 * For direct-attach devices, new udev is direct child of root hub
3533 * i.e. dwc -> xhci -> root_hub -> udev
3534 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3535 */
3536 if (udev->parent && !udev->parent->parent &&
3537 udev->dev.parent->parent == &dwc->xhci->dev) {
3538 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003539 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3540 /*
3541 * Core clock rate can be reduced only if root
3542 * hub SS port is not enabled/connected.
3543 */
3544 clk_set_rate(mdwc->core_clk,
3545 mdwc->core_clk_rate_hs);
3546 dev_dbg(mdwc->dev,
3547 "set hs core clk rate %ld\n",
3548 mdwc->core_clk_rate_hs);
3549 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3550 } else {
3551 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3552 }
3553
Jack Pham4d4e9342016-12-07 19:25:02 -08003554 if (udev->speed >= USB_SPEED_SUPER)
3555 max_power = udev->actconfig->desc.bMaxPower * 8;
3556 else
3557 max_power = udev->actconfig->desc.bMaxPower * 2;
3558 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3559 dev_name(&udev->dev), max_power);
3560
3561 /* inform PMIC of max power so it can optimize boost */
3562 pval.intval = max_power * 1000;
3563 power_supply_set_property(mdwc->usb_psy,
3564 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3565 } else {
3566 pval.intval = 0;
3567 power_supply_set_property(mdwc->usb_psy,
3568 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003569
3570 /* set rate back to default core clk rate */
3571 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3572 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3573 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003574 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003575 }
3576 }
3577
3578 return NOTIFY_DONE;
3579}
3580
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303581static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3582{
3583 static bool curr_perf_mode;
3584 int latency = mdwc->pm_qos_latency;
3585
3586 if ((curr_perf_mode == perf_mode) || !latency)
3587 return;
3588
3589 if (perf_mode)
3590 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3591 else
3592 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3593 PM_QOS_DEFAULT_VALUE);
3594
3595 curr_perf_mode = perf_mode;
3596 pr_debug("%s: latency updated to: %d\n", __func__,
3597 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3598}
3599
3600static void msm_dwc3_perf_vote_work(struct work_struct *w)
3601{
3602 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3603 perf_vote_work.work);
3604 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3605 static unsigned long last_irq_cnt;
3606 bool in_perf_mode = false;
3607
3608 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3609 in_perf_mode = true;
3610
3611 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3612 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3613
3614 last_irq_cnt = dwc->irq_cnt;
3615 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3616 schedule_delayed_work(&mdwc->perf_vote_work,
3617 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3618}
3619
Mayank Rana511f3b22016-08-02 12:00:11 -07003620#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3621
3622/**
3623 * dwc3_otg_start_host - helper function for starting/stoping the host
3624 * controller driver.
3625 *
3626 * @mdwc: Pointer to the dwc3_msm structure.
3627 * @on: start / stop the host controller driver.
3628 *
3629 * Returns 0 on success otherwise negative errno.
3630 */
3631static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3632{
3633 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3634 int ret = 0;
3635
3636 if (!dwc->xhci)
3637 return -EINVAL;
3638
3639 /*
3640 * The vbus_reg pointer could have multiple values
3641 * NULL: regulator_get() hasn't been called, or was previously deferred
3642 * IS_ERR: regulator could not be obtained, so skip using it
3643 * Valid pointer otherwise
3644 */
3645 if (!mdwc->vbus_reg) {
3646 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3647 "vbus_dwc3");
3648 if (IS_ERR(mdwc->vbus_reg) &&
3649 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3650 /* regulators may not be ready, so retry again later */
3651 mdwc->vbus_reg = NULL;
3652 return -EPROBE_DEFER;
3653 }
3654 }
3655
3656 if (on) {
3657 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3658
Mayank Rana511f3b22016-08-02 12:00:11 -07003659 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003660 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003661 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003662 usb_phy_notify_connect(mdwc->ss_phy,
3663 USB_SPEED_SUPER);
3664 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003665
Mayank Rana0d5efd72017-06-08 10:06:00 -07003666 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003667 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003668 dbg_event(0xFF, "StrtHost gync",
3669 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003670 if (!IS_ERR(mdwc->vbus_reg))
3671 ret = regulator_enable(mdwc->vbus_reg);
3672 if (ret) {
3673 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3674 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3675 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3676 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003677 dbg_event(0xFF, "vregerr psync",
3678 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003679 return ret;
3680 }
3681
3682 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3683
Jack Pham4d4e9342016-12-07 19:25:02 -08003684 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3685 usb_register_notify(&mdwc->host_nb);
3686
Manu Gautam976fdfc2016-08-18 09:27:35 +05303687 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3688 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003689 /*
3690 * FIXME If micro A cable is disconnected during system suspend,
3691 * xhci platform device will be removed before runtime pm is
3692 * enabled for xhci device. Due to this, disable_depth becomes
3693 * greater than one and runtimepm is not enabled for next microA
3694 * connect. Fix this by calling pm_runtime_init for xhci device.
3695 */
3696 pm_runtime_init(&dwc->xhci->dev);
3697 ret = platform_device_add(dwc->xhci);
3698 if (ret) {
3699 dev_err(mdwc->dev,
3700 "%s: failed to add XHCI pdev ret=%d\n",
3701 __func__, ret);
3702 if (!IS_ERR(mdwc->vbus_reg))
3703 regulator_disable(mdwc->vbus_reg);
3704 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3705 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3706 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003707 dbg_event(0xFF, "pdeverr psync",
3708 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003709 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003710 return ret;
3711 }
3712
3713 /*
3714 * In some cases it is observed that USB PHY is not going into
3715 * suspend with host mode suspend functionality. Hence disable
3716 * XHCI's runtime PM here if disable_host_mode_pm is set.
3717 */
3718 if (mdwc->disable_host_mode_pm)
3719 pm_runtime_disable(&dwc->xhci->dev);
3720
3721 mdwc->in_host_mode = true;
3722 dwc3_usb3_phy_suspend(dwc, true);
3723
3724 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003725 dbg_event(0xFF, "StrtHost psync",
3726 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003727 pm_runtime_mark_last_busy(mdwc->dev);
3728 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303729#ifdef CONFIG_SMP
3730 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3731 mdwc->pm_qos_req_dma.irq = dwc->irq;
3732#endif
3733 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3734 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3735 /* start in perf mode for better performance initially */
3736 msm_dwc3_perf_vote_update(mdwc, true);
3737 schedule_delayed_work(&mdwc->perf_vote_work,
3738 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003739 } else {
3740 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3741
Manu Gautam976fdfc2016-08-18 09:27:35 +05303742 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003743 if (!IS_ERR(mdwc->vbus_reg))
3744 ret = regulator_disable(mdwc->vbus_reg);
3745 if (ret) {
3746 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3747 return ret;
3748 }
3749
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303750 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3751 msm_dwc3_perf_vote_update(mdwc, false);
3752 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3753
Mayank Rana511f3b22016-08-02 12:00:11 -07003754 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003755 dbg_event(0xFF, "StopHost gsync",
3756 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003757 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003758 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3759 usb_phy_notify_disconnect(mdwc->ss_phy,
3760 USB_SPEED_SUPER);
3761 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3762 }
3763
Mayank Rana511f3b22016-08-02 12:00:11 -07003764 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003765 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003766 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003767
Mayank Rana511f3b22016-08-02 12:00:11 -07003768 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003769 mdwc->in_host_mode = false;
3770
Mayank Rana511f3b22016-08-02 12:00:11 -07003771 pm_runtime_mark_last_busy(mdwc->dev);
3772 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003773 dbg_event(0xFF, "StopHost psync",
3774 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003775 }
3776
3777 return 0;
3778}
3779
3780static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3781{
3782 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3783
3784 /* Update OTG VBUS Valid from HSPHY to controller */
3785 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3786 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3787 UTMI_OTG_VBUS_VALID,
3788 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3789
3790 /* Update only if Super Speed is supported */
3791 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3792 /* Update VBUS Valid from SSPHY to controller */
3793 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3794 LANE0_PWR_PRESENT,
3795 vbus_present ? LANE0_PWR_PRESENT : 0);
3796 }
3797}
3798
3799/**
3800 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3801 *
3802 * @mdwc: Pointer to the dwc3_msm structure.
3803 * @on: Turn ON/OFF the gadget.
3804 *
3805 * Returns 0 on success otherwise negative errno.
3806 */
3807static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3808{
3809 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3810
3811 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003812 dbg_event(0xFF, "StrtGdgt gsync",
3813 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003814
3815 if (on) {
3816 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3817 __func__, dwc->gadget.name);
3818
3819 dwc3_override_vbus_status(mdwc, true);
3820 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3821 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3822
3823 /*
3824 * Core reset is not required during start peripheral. Only
3825 * DBM reset is required, hence perform only DBM reset here.
3826 */
3827 dwc3_msm_block_reset(mdwc, false);
3828
3829 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3830 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303831#ifdef CONFIG_SMP
3832 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3833 mdwc->pm_qos_req_dma.irq = dwc->irq;
3834#endif
3835 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3836 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3837 /* start in perf mode for better performance initially */
3838 msm_dwc3_perf_vote_update(mdwc, true);
3839 schedule_delayed_work(&mdwc->perf_vote_work,
3840 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003841 } else {
3842 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3843 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303844 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3845 msm_dwc3_perf_vote_update(mdwc, false);
3846 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3847
Mayank Rana511f3b22016-08-02 12:00:11 -07003848 usb_gadget_vbus_disconnect(&dwc->gadget);
3849 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3850 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3851 dwc3_override_vbus_status(mdwc, false);
3852 dwc3_usb3_phy_suspend(dwc, false);
3853 }
3854
3855 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003856 dbg_event(0xFF, "StopGdgt psync",
3857 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003858
3859 return 0;
3860}
3861
3862static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3863{
Jack Pham8caff352016-08-19 16:33:55 -07003864 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003865 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003866
3867 if (mdwc->charging_disabled)
3868 return 0;
3869
3870 if (mdwc->max_power == mA)
3871 return 0;
3872
3873 if (!mdwc->usb_psy) {
3874 mdwc->usb_psy = power_supply_get_by_name("usb");
3875 if (!mdwc->usb_psy) {
3876 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3877 return -ENODEV;
3878 }
3879 }
3880
Fenglin Wu80826e02017-04-25 21:45:08 +08003881 power_supply_get_property(mdwc->usb_psy,
3882 POWER_SUPPLY_PROP_REAL_TYPE, &pval);
Jack Pham8caff352016-08-19 16:33:55 -07003883 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3884 return 0;
3885
Mayank Rana511f3b22016-08-02 12:00:11 -07003886 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3887
Mayank Rana511f3b22016-08-02 12:00:11 -07003888 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003889 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003890 ret = power_supply_set_property(mdwc->usb_psy,
3891 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3892 if (ret) {
3893 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3894 return ret;
3895 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003896
3897 mdwc->max_power = mA;
3898 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003899}
3900
3901
3902/**
3903 * dwc3_otg_sm_work - workqueue function.
3904 *
3905 * @w: Pointer to the dwc3 otg workqueue
3906 *
3907 * NOTE: After any change in otg_state, we must reschdule the state machine.
3908 */
3909static void dwc3_otg_sm_work(struct work_struct *w)
3910{
3911 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3912 struct dwc3 *dwc = NULL;
3913 bool work = 0;
3914 int ret = 0;
3915 unsigned long delay = 0;
3916 const char *state;
3917
3918 if (mdwc->dwc3)
3919 dwc = platform_get_drvdata(mdwc->dwc3);
3920
3921 if (!dwc) {
3922 dev_err(mdwc->dev, "dwc is NULL.\n");
3923 return;
3924 }
3925
3926 state = usb_otg_state_string(mdwc->otg_state);
3927 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003928 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003929
3930 /* Check OTG state */
3931 switch (mdwc->otg_state) {
3932 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08003933 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07003934 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08003935 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
3936 dbg_event(0xFF, "undef_id_!bsv", 0);
3937 pm_runtime_set_active(mdwc->dev);
3938 pm_runtime_enable(mdwc->dev);
3939 pm_runtime_get_noresume(mdwc->dev);
3940 dwc3_msm_resume(mdwc);
3941 pm_runtime_put_sync(mdwc->dev);
3942 dbg_event(0xFF, "Undef NoUSB",
3943 atomic_read(&mdwc->dev->power.usage_count));
3944 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003945 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08003946 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003947
Mayank Rana08e41922017-03-02 15:25:48 -08003948 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003949 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08003950 pm_runtime_set_suspended(mdwc->dev);
3951 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003952 /* fall-through */
3953 case OTG_STATE_B_IDLE:
3954 if (!test_bit(ID, &mdwc->inputs)) {
3955 dev_dbg(mdwc->dev, "!id\n");
3956 mdwc->otg_state = OTG_STATE_A_IDLE;
3957 work = 1;
3958 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3959 dev_dbg(mdwc->dev, "b_sess_vld\n");
3960 /*
3961 * Increment pm usage count upon cable connect. Count
3962 * is decremented in OTG_STATE_B_PERIPHERAL state on
3963 * cable disconnect or in bus suspend.
3964 */
3965 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003966 dbg_event(0xFF, "BIDLE gsync",
3967 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003968 dwc3_otg_start_peripheral(mdwc, 1);
3969 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3970 work = 1;
3971 } else {
3972 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3973 dev_dbg(mdwc->dev, "Cable disconnected\n");
3974 }
3975 break;
3976
3977 case OTG_STATE_B_PERIPHERAL:
3978 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3979 !test_bit(ID, &mdwc->inputs)) {
3980 dev_dbg(mdwc->dev, "!id || !bsv\n");
3981 mdwc->otg_state = OTG_STATE_B_IDLE;
3982 dwc3_otg_start_peripheral(mdwc, 0);
3983 /*
3984 * Decrement pm usage count upon cable disconnect
3985 * which was incremented upon cable connect in
3986 * OTG_STATE_B_IDLE state
3987 */
3988 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003989 dbg_event(0xFF, "!BSV psync",
3990 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003991 work = 1;
3992 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3993 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3994 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3995 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3996 /*
3997 * Decrement pm usage count upon bus suspend.
3998 * Count was incremented either upon cable
3999 * connect in OTG_STATE_B_IDLE or host
4000 * initiated resume after bus suspend in
4001 * OTG_STATE_B_SUSPEND state
4002 */
4003 pm_runtime_mark_last_busy(mdwc->dev);
4004 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004005 dbg_event(0xFF, "SUSP put",
4006 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004007 }
4008 break;
4009
4010 case OTG_STATE_B_SUSPEND:
4011 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4012 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4013 mdwc->otg_state = OTG_STATE_B_IDLE;
4014 dwc3_otg_start_peripheral(mdwc, 0);
4015 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4016 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4017 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4018 /*
4019 * Increment pm usage count upon host
4020 * initiated resume. Count was decremented
4021 * upon bus suspend in
4022 * OTG_STATE_B_PERIPHERAL state.
4023 */
4024 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004025 dbg_event(0xFF, "!SUSP gsync",
4026 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004027 }
4028 break;
4029
4030 case OTG_STATE_A_IDLE:
4031 /* Switch to A-Device*/
4032 if (test_bit(ID, &mdwc->inputs)) {
4033 dev_dbg(mdwc->dev, "id\n");
4034 mdwc->otg_state = OTG_STATE_B_IDLE;
4035 mdwc->vbus_retry_count = 0;
4036 work = 1;
4037 } else {
4038 mdwc->otg_state = OTG_STATE_A_HOST;
4039 ret = dwc3_otg_start_host(mdwc, 1);
4040 if ((ret == -EPROBE_DEFER) &&
4041 mdwc->vbus_retry_count < 3) {
4042 /*
4043 * Get regulator failed as regulator driver is
4044 * not up yet. Will try to start host after 1sec
4045 */
4046 mdwc->otg_state = OTG_STATE_A_IDLE;
4047 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4048 delay = VBUS_REG_CHECK_DELAY;
4049 work = 1;
4050 mdwc->vbus_retry_count++;
4051 } else if (ret) {
4052 dev_err(mdwc->dev, "unable to start host\n");
4053 mdwc->otg_state = OTG_STATE_A_IDLE;
4054 goto ret;
4055 }
4056 }
4057 break;
4058
4059 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304060 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4061 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004062 dwc3_otg_start_host(mdwc, 0);
4063 mdwc->otg_state = OTG_STATE_B_IDLE;
4064 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304065 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004066 work = 1;
4067 } else {
4068 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004069 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004070 if (dwc)
4071 pm_runtime_resume(&dwc->xhci->dev);
4072 }
4073 break;
4074
4075 default:
4076 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4077
4078 }
4079
4080 if (work)
4081 schedule_delayed_work(&mdwc->sm_work, delay);
4082
4083ret:
4084 return;
4085}
4086
4087#ifdef CONFIG_PM_SLEEP
4088static int dwc3_msm_pm_suspend(struct device *dev)
4089{
4090 int ret = 0;
4091 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4092 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4093
4094 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004095 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004096
4097 flush_workqueue(mdwc->dwc3_wq);
4098 if (!atomic_read(&dwc->in_lpm)) {
4099 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4100 return -EBUSY;
4101 }
4102
4103 ret = dwc3_msm_suspend(mdwc);
4104 if (!ret)
4105 atomic_set(&mdwc->pm_suspended, 1);
4106
4107 return ret;
4108}
4109
4110static int dwc3_msm_pm_resume(struct device *dev)
4111{
4112 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004113 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004114
4115 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004116 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004117
Mayank Rana511f3b22016-08-02 12:00:11 -07004118 /* flush to avoid race in read/write of pm_suspended */
4119 flush_workqueue(mdwc->dwc3_wq);
4120 atomic_set(&mdwc->pm_suspended, 0);
4121
4122 /* kick in otg state machine */
4123 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4124
4125 return 0;
4126}
4127#endif
4128
4129#ifdef CONFIG_PM
4130static int dwc3_msm_runtime_idle(struct device *dev)
4131{
Mayank Rana08e41922017-03-02 15:25:48 -08004132 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4133 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4134
Mayank Rana511f3b22016-08-02 12:00:11 -07004135 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004136 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004137
4138 return 0;
4139}
4140
4141static int dwc3_msm_runtime_suspend(struct device *dev)
4142{
4143 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004144 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004145
4146 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004147 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004148
4149 return dwc3_msm_suspend(mdwc);
4150}
4151
4152static int dwc3_msm_runtime_resume(struct device *dev)
4153{
4154 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004155 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004156
4157 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004158 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004159
4160 return dwc3_msm_resume(mdwc);
4161}
4162#endif
4163
4164static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4165 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4166 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4167 dwc3_msm_runtime_idle)
4168};
4169
4170static const struct of_device_id of_dwc3_matach[] = {
4171 {
4172 .compatible = "qcom,dwc-usb3-msm",
4173 },
4174 { },
4175};
4176MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4177
4178static struct platform_driver dwc3_msm_driver = {
4179 .probe = dwc3_msm_probe,
4180 .remove = dwc3_msm_remove,
4181 .driver = {
4182 .name = "msm-dwc3",
4183 .pm = &dwc3_msm_dev_pm_ops,
4184 .of_match_table = of_dwc3_matach,
4185 },
4186};
4187
4188MODULE_LICENSE("GPL v2");
4189MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4190
4191static int dwc3_msm_init(void)
4192{
4193 return platform_driver_register(&dwc3_msm_driver);
4194}
4195module_init(dwc3_msm_init);
4196
4197static void __exit dwc3_msm_exit(void)
4198{
4199 platform_driver_unregister(&dwc3_msm_driver);
4200}
4201module_exit(dwc3_msm_exit);