blob: 8d2b4299fcbfaab763ea8354cd46f95493202814 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
58/* time out to wait for USB cable status notification (in ms)*/
59#define SM_INIT_TIMEOUT 30000
60
61/* AHB2PHY register offsets */
62#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
63
64/* AHB2PHY read/write waite value */
65#define ONE_READ_WRITE_WAIT 0x11
66
67/* cpu to fix usb interrupt */
68static int cpu_to_affin;
69module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
70MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
71
72/* XHCI registers */
73#define USB3_HCSPARAMS1 (0x4)
74#define USB3_PORTSC (0x420)
75
76/**
77 * USB QSCRATCH Hardware registers
78 *
79 */
80#define QSCRATCH_REG_OFFSET (0x000F8800)
81#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
82#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
83#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
84#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
85
86#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
87#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
88#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
89#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
90#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
91
92/* QSCRATCH_GENERAL_CFG register bit offset */
93#define PIPE_UTMI_CLK_SEL BIT(0)
94#define PIPE3_PHYSTATUS_SW BIT(3)
95#define PIPE_UTMI_CLK_DIS BIT(8)
96
97#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
98#define UTMI_OTG_VBUS_VALID BIT(20)
99#define SW_SESSVLD_SEL BIT(28)
100
101#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
102#define LANE0_PWR_PRESENT BIT(24)
103
104/* GSI related registers */
105#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
106#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
107
108#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
109#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
110#define GSI_CLK_EN_MASK BIT(12)
111#define BLOCK_GSI_WR_GO_MASK BIT(1)
112#define GSI_EN_MASK BIT(0)
113
114#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
115#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
116#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
117#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
118
119#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
120#define GSI_WR_CTRL_STATE_MASK BIT(15)
121
Mayank Ranaf4918d32016-12-15 13:35:55 -0800122#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
123#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
124#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
125#define DWC3_GEVENT_TYPE_GSI 0x3
126
Mayank Rana511f3b22016-08-02 12:00:11 -0700127struct dwc3_msm_req_complete {
128 struct list_head list_item;
129 struct usb_request *req;
130 void (*orig_complete)(struct usb_ep *ep,
131 struct usb_request *req);
132};
133
134enum dwc3_id_state {
135 DWC3_ID_GROUND = 0,
136 DWC3_ID_FLOAT,
137};
138
139/* for type c cable */
140enum plug_orientation {
141 ORIENTATION_NONE,
142 ORIENTATION_CC1,
143 ORIENTATION_CC2,
144};
145
146/* Input bits to state machine (mdwc->inputs) */
147
148#define ID 0
149#define B_SESS_VLD 1
150#define B_SUSPEND 2
151
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530152#define PM_QOS_SAMPLE_SEC 2
153#define PM_QOS_THRESHOLD 400
154
Mayank Rana511f3b22016-08-02 12:00:11 -0700155struct dwc3_msm {
156 struct device *dev;
157 void __iomem *base;
158 void __iomem *ahb2phy_base;
159 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700160 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700161 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
162 struct list_head req_complete_list;
163 struct clk *xo_clk;
164 struct clk *core_clk;
165 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800166 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700167 struct clk *iface_clk;
168 struct clk *sleep_clk;
169 struct clk *utmi_clk;
170 unsigned int utmi_clk_rate;
171 struct clk *utmi_clk_src;
172 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530173 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700174 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530175 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700176 struct regulator *dwc3_gdsc;
177
178 struct usb_phy *hs_phy, *ss_phy;
179
180 struct dbm *dbm;
181
182 /* VBUS regulator for host mode */
183 struct regulator *vbus_reg;
184 int vbus_retry_count;
185 bool resume_pending;
186 atomic_t pm_suspended;
187 int hs_phy_irq;
188 int ss_phy_irq;
189 struct work_struct resume_work;
190 struct work_struct restart_usb_work;
191 bool in_restart;
192 struct workqueue_struct *dwc3_wq;
193 struct delayed_work sm_work;
194 unsigned long inputs;
195 unsigned int max_power;
196 bool charging_disabled;
197 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700198 struct work_struct bus_vote_w;
199 unsigned int bus_vote;
200 u32 bus_perf_client;
201 struct msm_bus_scale_pdata *bus_scale_table;
202 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700203 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800205 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 unsigned int tx_fifo_size;
207 bool vbus_active;
208 bool suspend;
209 bool disable_host_mode_pm;
210 enum dwc3_id_state id_state;
211 unsigned long lpm_flags;
212#define MDWC3_SS_PHY_SUSPEND BIT(0)
213#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
214#define MDWC3_POWER_COLLAPSE BIT(2)
215
216 unsigned int irq_to_affin;
217 struct notifier_block dwc3_cpu_notifier;
218
219 struct extcon_dev *extcon_vbus;
220 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800221 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700222 struct notifier_block vbus_nb;
223 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800224 struct notifier_block eud_event_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700225
Jack Pham4d4e9342016-12-07 19:25:02 -0800226 struct notifier_block host_nb;
227
Mayank Rana511f3b22016-08-02 12:00:11 -0700228 int pwr_event_irq;
229 atomic_t in_p3;
230 unsigned int lpm_to_suspend_delay;
231 bool init;
232 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800233 u32 num_gsi_event_buffers;
234 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530235 int pm_qos_latency;
236 struct pm_qos_request pm_qos_req_dma;
237 struct delayed_work perf_vote_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700238};
239
240#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
241#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
242#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
243
244#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
245#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
246#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
247
248#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
249#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
250#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
251
252#define DSTS_CONNECTSPD_SS 0x4
253
254
255static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
256static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800257static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana511f3b22016-08-02 12:00:11 -0700258/**
259 *
260 * Read register with debug info.
261 *
262 * @base - DWC3 base virtual address.
263 * @offset - register offset.
264 *
265 * @return u32
266 */
267static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
268{
269 u32 val = ioread32(base + offset);
270 return val;
271}
272
273/**
274 * Read register masked field with debug info.
275 *
276 * @base - DWC3 base virtual address.
277 * @offset - register offset.
278 * @mask - register bitmask.
279 *
280 * @return u32
281 */
282static inline u32 dwc3_msm_read_reg_field(void *base,
283 u32 offset,
284 const u32 mask)
285{
286 u32 shift = find_first_bit((void *)&mask, 32);
287 u32 val = ioread32(base + offset);
288
289 val &= mask; /* clear other bits */
290 val >>= shift;
291 return val;
292}
293
294/**
295 *
296 * Write register with debug info.
297 *
298 * @base - DWC3 base virtual address.
299 * @offset - register offset.
300 * @val - value to write.
301 *
302 */
303static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
304{
305 iowrite32(val, base + offset);
306}
307
308/**
309 * Write register masked field with debug info.
310 *
311 * @base - DWC3 base virtual address.
312 * @offset - register offset.
313 * @mask - register bitmask.
314 * @val - value to write.
315 *
316 */
317static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
318 const u32 mask, u32 val)
319{
320 u32 shift = find_first_bit((void *)&mask, 32);
321 u32 tmp = ioread32(base + offset);
322
323 tmp &= ~mask; /* clear written bits */
324 val = tmp | (val << shift);
325 iowrite32(val, base + offset);
326}
327
328/**
329 * Write register and read back masked value to confirm it is written
330 *
331 * @base - DWC3 base virtual address.
332 * @offset - register offset.
333 * @mask - register bitmask specifying what should be updated
334 * @val - value to write.
335 *
336 */
337static inline void dwc3_msm_write_readback(void *base, u32 offset,
338 const u32 mask, u32 val)
339{
340 u32 write_val, tmp = ioread32(base + offset);
341
342 tmp &= ~mask; /* retain other bits */
343 write_val = tmp | val;
344
345 iowrite32(write_val, base + offset);
346
347 /* Read back to see if val was written */
348 tmp = ioread32(base + offset);
349 tmp &= mask; /* clear other bits */
350
351 if (tmp != val)
352 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
353 __func__, val, offset);
354}
355
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800356static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
357{
358 int i, num_ports;
359 u32 reg;
360
361 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
362 num_ports = HCS_MAX_PORTS(reg);
363
364 for (i = 0; i < num_ports; i++) {
365 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
366 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
367 return true;
368 }
369
370 return false;
371}
372
Mayank Rana511f3b22016-08-02 12:00:11 -0700373static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
374{
375 int i, num_ports;
376 u32 reg;
377
378 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
379 num_ports = HCS_MAX_PORTS(reg);
380
381 for (i = 0; i < num_ports; i++) {
382 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
383 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
384 return true;
385 }
386
387 return false;
388}
389
390static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
391{
392 u8 speed;
393
394 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
395 return !!(speed & DSTS_CONNECTSPD_SS);
396}
397
398static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
399{
400 if (mdwc->in_host_mode)
401 return dwc3_msm_is_host_superspeed(mdwc);
402
403 return dwc3_msm_is_dev_superspeed(mdwc);
404}
405
406#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
407/**
408 * Configure the DBM with the BAM's data fifo.
409 * This function is called by the USB BAM Driver
410 * upon initialization.
411 *
412 * @ep - pointer to usb endpoint.
413 * @addr - address of data fifo.
414 * @size - size of data fifo.
415 *
416 */
417int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
418 u32 size, u8 dst_pipe_idx)
419{
420 struct dwc3_ep *dep = to_dwc3_ep(ep);
421 struct dwc3 *dwc = dep->dwc;
422 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
423
424 dev_dbg(mdwc->dev, "%s\n", __func__);
425
426 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
427 dst_pipe_idx);
428}
429
430
431/**
432* Cleanups for msm endpoint on request complete.
433*
434* Also call original request complete.
435*
436* @usb_ep - pointer to usb_ep instance.
437* @request - pointer to usb_request instance.
438*
439* @return int - 0 on success, negative on error.
440*/
441static void dwc3_msm_req_complete_func(struct usb_ep *ep,
442 struct usb_request *request)
443{
444 struct dwc3_ep *dep = to_dwc3_ep(ep);
445 struct dwc3 *dwc = dep->dwc;
446 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
447 struct dwc3_msm_req_complete *req_complete = NULL;
448
449 /* Find original request complete function and remove it from list */
450 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
451 if (req_complete->req == request)
452 break;
453 }
454 if (!req_complete || req_complete->req != request) {
455 dev_err(dep->dwc->dev, "%s: could not find the request\n",
456 __func__);
457 return;
458 }
459 list_del(&req_complete->list_item);
460
461 /*
462 * Release another one TRB to the pool since DBM queue took 2 TRBs
463 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
464 * released only one.
465 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700466 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700467
468 /* Unconfigure dbm ep */
469 dbm_ep_unconfig(mdwc->dbm, dep->number);
470
471 /*
472 * If this is the last endpoint we unconfigured, than reset also
473 * the event buffers; unless unconfiguring the ep due to lpm,
474 * in which case the event buffer only gets reset during the
475 * block reset.
476 */
477 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
478 !dbm_reset_ep_after_lpm(mdwc->dbm))
479 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
480
481 /*
482 * Call original complete function, notice that dwc->lock is already
483 * taken by the caller of this function (dwc3_gadget_giveback()).
484 */
485 request->complete = req_complete->orig_complete;
486 if (request->complete)
487 request->complete(ep, request);
488
489 kfree(req_complete);
490}
491
492
493/**
494* Helper function
495*
496* Reset DBM endpoint.
497*
498* @mdwc - pointer to dwc3_msm instance.
499* @dep - pointer to dwc3_ep instance.
500*
501* @return int - 0 on success, negative on error.
502*/
503static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
504{
505 int ret;
506
507 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
508
509 /* Reset the dbm endpoint */
510 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
511 if (ret) {
512 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
513 __func__);
514 return ret;
515 }
516
517 /*
518 * The necessary delay between asserting and deasserting the dbm ep
519 * reset is based on the number of active endpoints. If there is more
520 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
521 * delay will suffice.
522 */
523 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
524 usleep_range(1000, 1200);
525 else
526 udelay(10);
527 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
528 if (ret) {
529 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
530 __func__);
531 return ret;
532 }
533
534 return 0;
535}
536
537/**
538* Reset the DBM endpoint which is linked to the given USB endpoint.
539*
540* @usb_ep - pointer to usb_ep instance.
541*
542* @return int - 0 on success, negative on error.
543*/
544
545int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
546{
547 struct dwc3_ep *dep = to_dwc3_ep(ep);
548 struct dwc3 *dwc = dep->dwc;
549 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
550
551 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
552}
553EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
554
555
556/**
557* Helper function.
558* See the header of the dwc3_msm_ep_queue function.
559*
560* @dwc3_ep - pointer to dwc3_ep instance.
561* @req - pointer to dwc3_request instance.
562*
563* @return int - 0 on success, negative on error.
564*/
565static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
566{
567 struct dwc3_trb *trb;
568 struct dwc3_trb *trb_link;
569 struct dwc3_gadget_ep_cmd_params params;
570 u32 cmd;
571 int ret = 0;
572
Mayank Rana83ad5822016-08-09 14:17:22 -0700573 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700574 * this request is issued with start transfer. The request will be out
575 * from this list in 2 cases. The first is that the transfer will be
576 * completed (not if the transfer is endless using a circular TRBs with
577 * with link TRB). The second case is an option to do stop stransfer,
578 * this can be initiated by the function driver when calling dequeue.
579 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700580 req->started = true;
581 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700582
583 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700584 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
585 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700586 memset(trb, 0, sizeof(*trb));
587
588 req->trb = trb;
589 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
590 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
591 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
592 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
593 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
594
595 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700596 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
597 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700598 memset(trb_link, 0, sizeof(*trb_link));
599
600 trb_link->bpl = lower_32_bits(req->trb_dma);
601 trb_link->bph = DBM_TRB_BIT |
602 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
603 trb_link->size = 0;
604 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
605
606 /*
607 * Now start the transfer
608 */
609 memset(&params, 0, sizeof(params));
610 params.param0 = 0; /* TDAddr High */
611 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
612
613 /* DBM requires IOC to be set */
614 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700615 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700616 if (ret < 0) {
617 dev_dbg(dep->dwc->dev,
618 "%s: failed to send STARTTRANSFER command\n",
619 __func__);
620
621 list_del(&req->list);
622 return ret;
623 }
624 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700625 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700626
627 return ret;
628}
629
630/**
631* Queue a usb request to the DBM endpoint.
632* This function should be called after the endpoint
633* was enabled by the ep_enable.
634*
635* This function prepares special structure of TRBs which
636* is familiar with the DBM HW, so it will possible to use
637* this endpoint in DBM mode.
638*
639* The TRBs prepared by this function, is one normal TRB
640* which point to a fake buffer, followed by a link TRB
641* that points to the first TRB.
642*
643* The API of this function follow the regular API of
644* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
645*
646* @usb_ep - pointer to usb_ep instance.
647* @request - pointer to usb_request instance.
648* @gfp_flags - possible flags.
649*
650* @return int - 0 on success, negative on error.
651*/
652static int dwc3_msm_ep_queue(struct usb_ep *ep,
653 struct usb_request *request, gfp_t gfp_flags)
654{
655 struct dwc3_request *req = to_dwc3_request(request);
656 struct dwc3_ep *dep = to_dwc3_ep(ep);
657 struct dwc3 *dwc = dep->dwc;
658 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
659 struct dwc3_msm_req_complete *req_complete;
660 unsigned long flags;
661 int ret = 0, size;
662 u8 bam_pipe;
663 bool producer;
664 bool disable_wb;
665 bool internal_mem;
666 bool ioc;
667 bool superspeed;
668
669 if (!(request->udc_priv & MSM_SPS_MODE)) {
670 /* Not SPS mode, call original queue */
671 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
672 __func__);
673
674 return (mdwc->original_ep_ops[dep->number])->queue(ep,
675 request,
676 gfp_flags);
677 }
678
679 /* HW restriction regarding TRB size (8KB) */
680 if (req->request.length < 0x2000) {
681 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
682 return -EINVAL;
683 }
684
685 /*
686 * Override req->complete function, but before doing that,
687 * store it's original pointer in the req_complete_list.
688 */
689 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
690 if (!req_complete)
691 return -ENOMEM;
692
693 req_complete->req = request;
694 req_complete->orig_complete = request->complete;
695 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
696 request->complete = dwc3_msm_req_complete_func;
697
698 /*
699 * Configure the DBM endpoint
700 */
701 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
702 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
703 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
704 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
705 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
706
707 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
708 disable_wb, internal_mem, ioc);
709 if (ret < 0) {
710 dev_err(mdwc->dev,
711 "error %d after calling dbm_ep_config\n", ret);
712 return ret;
713 }
714
715 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
716 __func__, request, ep->name, request->length);
717 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
718 dbm_event_buffer_config(mdwc->dbm,
719 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
720 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
721 DWC3_GEVNTSIZ_SIZE(size));
722
723 /*
724 * We must obtain the lock of the dwc3 core driver,
725 * including disabling interrupts, so we will be sure
726 * that we are the only ones that configure the HW device
727 * core and ensure that we queuing the request will finish
728 * as soon as possible so we will release back the lock.
729 */
730 spin_lock_irqsave(&dwc->lock, flags);
731 if (!dep->endpoint.desc) {
732 dev_err(mdwc->dev,
733 "%s: trying to queue request %p to disabled ep %s\n",
734 __func__, request, ep->name);
735 ret = -EPERM;
736 goto err;
737 }
738
739 if (dep->number == 0 || dep->number == 1) {
740 dev_err(mdwc->dev,
741 "%s: trying to queue dbm request %p to control ep %s\n",
742 __func__, request, ep->name);
743 ret = -EPERM;
744 goto err;
745 }
746
747
Mayank Rana83ad5822016-08-09 14:17:22 -0700748 if (dep->trb_dequeue != dep->trb_enqueue ||
749 !list_empty(&dep->pending_list)
750 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700751 dev_err(mdwc->dev,
752 "%s: trying to queue dbm request %p tp ep %s\n",
753 __func__, request, ep->name);
754 ret = -EPERM;
755 goto err;
756 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700757 dep->trb_dequeue = 0;
758 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700759 }
760
761 ret = __dwc3_msm_ep_queue(dep, req);
762 if (ret < 0) {
763 dev_err(mdwc->dev,
764 "error %d after calling __dwc3_msm_ep_queue\n", ret);
765 goto err;
766 }
767
768 spin_unlock_irqrestore(&dwc->lock, flags);
769 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
770 dbm_set_speed(mdwc->dbm, (u8)superspeed);
771
772 return 0;
773
774err:
775 spin_unlock_irqrestore(&dwc->lock, flags);
776 kfree(req_complete);
777 return ret;
778}
779
780/*
781* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
782*
783* @usb_ep - pointer to usb_ep instance.
784*
785* @return int - XferRscIndex
786*/
787static inline int gsi_get_xfer_index(struct usb_ep *ep)
788{
789 struct dwc3_ep *dep = to_dwc3_ep(ep);
790
791 return dep->resource_index;
792}
793
794/*
795* Fills up the GSI channel information needed in call to IPA driver
796* for GSI channel creation.
797*
798* @usb_ep - pointer to usb_ep instance.
799* @ch_info - output parameter with requested channel info
800*/
801static void gsi_get_channel_info(struct usb_ep *ep,
802 struct gsi_channel_info *ch_info)
803{
804 struct dwc3_ep *dep = to_dwc3_ep(ep);
805 int last_trb_index = 0;
806 struct dwc3 *dwc = dep->dwc;
807 struct usb_gsi_request *request = ch_info->ch_req;
808
809 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
810 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700811 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700812 ch_info->depcmd_hi_addr = 0;
813
814 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
815 &dep->trb_pool[0]);
816 /* Convert to multipled of 1KB */
817 ch_info->const_buffer_size = request->buf_len/1024;
818
819 /* IN direction */
820 if (dep->direction) {
821 /*
822 * Multiply by size of each TRB for xfer_ring_len in bytes.
823 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
824 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
825 */
826 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
827 last_trb_index = 2 * request->num_bufs + 2;
828 } else { /* OUT direction */
829 /*
830 * Multiply by size of each TRB for xfer_ring_len in bytes.
831 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
832 * LINK TRB.
833 */
834 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
835 last_trb_index = request->num_bufs + 1;
836 }
837
838 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
839 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
840 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
841 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
842 DWC3_GEVNTCOUNT(ep->ep_intr_num));
843 ch_info->gevntcount_hi_addr = 0;
844
845 dev_dbg(dwc->dev,
846 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
847 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
848 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
849}
850
851/*
852* Perform StartXfer on GSI EP. Stores XferRscIndex.
853*
854* @usb_ep - pointer to usb_ep instance.
855*
856* @return int - 0 on success
857*/
858static int gsi_startxfer_for_ep(struct usb_ep *ep)
859{
860 int ret;
861 struct dwc3_gadget_ep_cmd_params params;
862 u32 cmd;
863 struct dwc3_ep *dep = to_dwc3_ep(ep);
864 struct dwc3 *dwc = dep->dwc;
865
866 memset(&params, 0, sizeof(params));
867 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
868 params.param0 |= (ep->ep_intr_num << 16);
869 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
870 &dep->trb_pool[0]));
871 cmd = DWC3_DEPCMD_STARTTRANSFER;
872 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700873 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700874
875 if (ret < 0)
876 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700877 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700878 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
879 return ret;
880}
881
882/*
883* Store Ring Base and Doorbell Address for GSI EP
884* for GSI channel creation.
885*
886* @usb_ep - pointer to usb_ep instance.
887* @dbl_addr - Doorbell address obtained from IPA driver
888*/
889static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
890{
891 struct dwc3_ep *dep = to_dwc3_ep(ep);
892 struct dwc3 *dwc = dep->dwc;
893 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
894 int n = ep->ep_intr_num - 1;
895
896 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
897 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
898 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
899
900 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
901 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
902 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
903 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
904}
905
906/*
907* Rings Doorbell for IN GSI Channel
908*
909* @usb_ep - pointer to usb_ep instance.
910* @request - pointer to GSI request. This is used to pass in the
911* address of the GSI doorbell obtained from IPA driver
912*/
913static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
914{
915 void __iomem *gsi_dbl_address_lsb;
916 void __iomem *gsi_dbl_address_msb;
917 dma_addr_t offset;
918 u64 dbl_addr = *((u64 *)request->buf_base_addr);
919 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
920 u32 dbl_hi_addr = (dbl_addr >> 32);
921 u32 num_trbs = (request->num_bufs * 2 + 2);
922 struct dwc3_ep *dep = to_dwc3_ep(ep);
923 struct dwc3 *dwc = dep->dwc;
924 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
925
926 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
927 dbl_lo_addr, sizeof(u32));
928 if (!gsi_dbl_address_lsb)
929 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
930
931 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
932 dbl_hi_addr, sizeof(u32));
933 if (!gsi_dbl_address_msb)
934 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
935
936 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
937 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
938 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
939
940 writel_relaxed(offset, gsi_dbl_address_lsb);
941 writel_relaxed(0, gsi_dbl_address_msb);
942}
943
944/*
945* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
946*
947* @usb_ep - pointer to usb_ep instance.
948* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
949*
950* @return int - 0 on success
951*/
952static int gsi_updatexfer_for_ep(struct usb_ep *ep,
953 struct usb_gsi_request *request)
954{
955 int i;
956 int ret;
957 u32 cmd;
958 int num_trbs = request->num_bufs + 1;
959 struct dwc3_trb *trb;
960 struct dwc3_gadget_ep_cmd_params params;
961 struct dwc3_ep *dep = to_dwc3_ep(ep);
962 struct dwc3 *dwc = dep->dwc;
963
964 for (i = 0; i < num_trbs - 1; i++) {
965 trb = &dep->trb_pool[i];
966 trb->ctrl |= DWC3_TRB_CTRL_HWO;
967 }
968
969 memset(&params, 0, sizeof(params));
970 cmd = DWC3_DEPCMD_UPDATETRANSFER;
971 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700972 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700973 dep->flags |= DWC3_EP_BUSY;
974 if (ret < 0)
975 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
976 return ret;
977}
978
979/*
980* Perform EndXfer on particular GSI EP.
981*
982* @usb_ep - pointer to usb_ep instance.
983*/
984static void gsi_endxfer_for_ep(struct usb_ep *ep)
985{
986 struct dwc3_ep *dep = to_dwc3_ep(ep);
987 struct dwc3 *dwc = dep->dwc;
988
989 dwc3_stop_active_transfer(dwc, dep->number, true);
990}
991
992/*
993* Allocates and configures TRBs for GSI EPs.
994*
995* @usb_ep - pointer to usb_ep instance.
996* @request - pointer to GSI request.
997*
998* @return int - 0 on success
999*/
1000static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1001{
1002 int i = 0;
1003 dma_addr_t buffer_addr = req->dma;
1004 struct dwc3_ep *dep = to_dwc3_ep(ep);
1005 struct dwc3 *dwc = dep->dwc;
1006 struct dwc3_trb *trb;
1007 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
1008 : (req->num_bufs + 1);
1009
Jack Phambbe27962017-03-23 18:42:26 -07001010 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001011 num_trbs * sizeof(struct dwc3_trb),
1012 num_trbs * sizeof(struct dwc3_trb), 0);
1013 if (!dep->trb_dma_pool) {
1014 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1015 dep->name);
1016 return -ENOMEM;
1017 }
1018
1019 dep->num_trbs = num_trbs;
1020
1021 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1022 GFP_KERNEL, &dep->trb_pool_dma);
1023 if (!dep->trb_pool) {
1024 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1025 dep->name);
1026 return -ENOMEM;
1027 }
1028
1029 /* IN direction */
1030 if (dep->direction) {
1031 for (i = 0; i < num_trbs ; i++) {
1032 trb = &dep->trb_pool[i];
1033 memset(trb, 0, sizeof(*trb));
1034 /* Set up first n+1 TRBs for ZLPs */
1035 if (i < (req->num_bufs + 1)) {
1036 trb->bpl = 0;
1037 trb->bph = 0;
1038 trb->size = 0;
1039 trb->ctrl = DWC3_TRBCTL_NORMAL
1040 | DWC3_TRB_CTRL_IOC;
1041 continue;
1042 }
1043
1044 /* Setup n TRBs pointing to valid buffers */
1045 trb->bpl = lower_32_bits(buffer_addr);
1046 trb->bph = 0;
1047 trb->size = 0;
1048 trb->ctrl = DWC3_TRBCTL_NORMAL
1049 | DWC3_TRB_CTRL_IOC;
1050 buffer_addr += req->buf_len;
1051
1052 /* Set up the Link TRB at the end */
1053 if (i == (num_trbs - 1)) {
1054 trb->bpl = dwc3_trb_dma_offset(dep,
1055 &dep->trb_pool[0]);
1056 trb->bph = (1 << 23) | (1 << 21)
1057 | (ep->ep_intr_num << 16);
1058 trb->size = 0;
1059 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1060 | DWC3_TRB_CTRL_HWO;
1061 }
1062 }
1063 } else { /* OUT direction */
1064
1065 for (i = 0; i < num_trbs ; i++) {
1066
1067 trb = &dep->trb_pool[i];
1068 memset(trb, 0, sizeof(*trb));
1069 trb->bpl = lower_32_bits(buffer_addr);
1070 trb->bph = 0;
1071 trb->size = req->buf_len;
1072 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1073 | DWC3_TRB_CTRL_CSP
1074 | DWC3_TRB_CTRL_ISP_IMI;
1075 buffer_addr += req->buf_len;
1076
1077 /* Set up the Link TRB at the end */
1078 if (i == (num_trbs - 1)) {
1079 trb->bpl = dwc3_trb_dma_offset(dep,
1080 &dep->trb_pool[0]);
1081 trb->bph = (1 << 23) | (1 << 21)
1082 | (ep->ep_intr_num << 16);
1083 trb->size = 0;
1084 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1085 | DWC3_TRB_CTRL_HWO;
1086 }
1087 }
1088 }
1089 return 0;
1090}
1091
1092/*
1093* Frees TRBs for GSI EPs.
1094*
1095* @usb_ep - pointer to usb_ep instance.
1096*
1097*/
1098static void gsi_free_trbs(struct usb_ep *ep)
1099{
1100 struct dwc3_ep *dep = to_dwc3_ep(ep);
1101
1102 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1103 return;
1104
1105 /* Free TRBs and TRB pool for EP */
1106 if (dep->trb_dma_pool) {
1107 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1108 dep->trb_pool_dma);
1109 dma_pool_destroy(dep->trb_dma_pool);
1110 dep->trb_pool = NULL;
1111 dep->trb_pool_dma = 0;
1112 dep->trb_dma_pool = NULL;
1113 }
1114}
1115/*
1116* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1117*
1118* @usb_ep - pointer to usb_ep instance.
1119* @request - pointer to GSI request.
1120*/
1121static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1122{
1123 struct dwc3_ep *dep = to_dwc3_ep(ep);
1124 struct dwc3 *dwc = dep->dwc;
1125 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1126 struct dwc3_gadget_ep_cmd_params params;
1127 const struct usb_endpoint_descriptor *desc = ep->desc;
1128 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1129 u32 reg;
1130
1131 memset(&params, 0x00, sizeof(params));
1132
1133 /* Configure GSI EP */
1134 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1135 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1136
1137 /* Burst size is only needed in SuperSpeed mode */
1138 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1139 u32 burst = dep->endpoint.maxburst - 1;
1140
1141 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1142 }
1143
1144 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1145 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1146 | DWC3_DEPCFG_STREAM_EVENT_EN;
1147 dep->stream_capable = true;
1148 }
1149
1150 /* Set EP number */
1151 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1152
1153 /* Set interrupter number for GSI endpoints */
1154 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1155
1156 /* Enable XferInProgress and XferComplete Interrupts */
1157 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1158 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1159 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1160 /*
1161 * We must use the lower 16 TX FIFOs even though
1162 * HW might have more
1163 */
1164 /* Remove FIFO Number for GSI EP*/
1165 if (dep->direction)
1166 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1167
1168 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1169
1170 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1171 params.param0, params.param1, params.param2, dep->name);
1172
Mayank Rana83ad5822016-08-09 14:17:22 -07001173 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001174
1175 /* Set XferRsc Index for GSI EP */
1176 if (!(dep->flags & DWC3_EP_ENABLED)) {
1177 memset(&params, 0x00, sizeof(params));
1178 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001179 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001180 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1181
1182 dep->endpoint.desc = desc;
1183 dep->comp_desc = comp_desc;
1184 dep->type = usb_endpoint_type(desc);
1185 dep->flags |= DWC3_EP_ENABLED;
1186 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1187 reg |= DWC3_DALEPENA_EP(dep->number);
1188 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1189 }
1190
1191}
1192
1193/*
1194* Enables USB wrapper for GSI
1195*
1196* @usb_ep - pointer to usb_ep instance.
1197*/
1198static void gsi_enable(struct usb_ep *ep)
1199{
1200 struct dwc3_ep *dep = to_dwc3_ep(ep);
1201 struct dwc3 *dwc = dep->dwc;
1202 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1203
1204 dwc3_msm_write_reg_field(mdwc->base,
1205 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1206 dwc3_msm_write_reg_field(mdwc->base,
1207 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1208 dwc3_msm_write_reg_field(mdwc->base,
1209 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1210 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1211 dwc3_msm_write_reg_field(mdwc->base,
1212 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1213}
1214
1215/*
1216* Block or allow doorbell towards GSI
1217*
1218* @usb_ep - pointer to usb_ep instance.
1219* @request - pointer to GSI request. In this case num_bufs is used as a bool
1220* to set or clear the doorbell bit
1221*/
1222static void gsi_set_clear_dbell(struct usb_ep *ep,
1223 bool block_db)
1224{
1225
1226 struct dwc3_ep *dep = to_dwc3_ep(ep);
1227 struct dwc3 *dwc = dep->dwc;
1228 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1229
1230 dwc3_msm_write_reg_field(mdwc->base,
1231 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1232}
1233
1234/*
1235* Performs necessary checks before stopping GSI channels
1236*
1237* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1238*/
1239static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1240{
1241 u32 timeout = 1500;
1242 u32 reg = 0;
1243 struct dwc3_ep *dep = to_dwc3_ep(ep);
1244 struct dwc3 *dwc = dep->dwc;
1245 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1246
1247 while (dwc3_msm_read_reg_field(mdwc->base,
1248 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1249 if (!timeout--) {
1250 dev_err(mdwc->dev,
1251 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1252 return false;
1253 }
1254 }
1255 /* Check for U3 only if we are not handling Function Suspend */
1256 if (!f_suspend) {
1257 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1258 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1259 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1260 return false;
1261 }
1262 }
1263
1264 return true;
1265}
1266
1267
1268/**
1269* Performs GSI operations or GSI EP related operations.
1270*
1271* @usb_ep - pointer to usb_ep instance.
1272* @op_data - pointer to opcode related data.
1273* @op - GSI related or GSI EP related op code.
1274*
1275* @return int - 0 on success, negative on error.
1276* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1277*/
1278static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1279 void *op_data, enum gsi_ep_op op)
1280{
1281 u32 ret = 0;
1282 struct dwc3_ep *dep = to_dwc3_ep(ep);
1283 struct dwc3 *dwc = dep->dwc;
1284 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1285 struct usb_gsi_request *request;
1286 struct gsi_channel_info *ch_info;
1287 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001288 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001289
1290 switch (op) {
1291 case GSI_EP_OP_PREPARE_TRBS:
1292 request = (struct usb_gsi_request *)op_data;
1293 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1294 ret = gsi_prepare_trbs(ep, request);
1295 break;
1296 case GSI_EP_OP_FREE_TRBS:
1297 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1298 gsi_free_trbs(ep);
1299 break;
1300 case GSI_EP_OP_CONFIG:
1301 request = (struct usb_gsi_request *)op_data;
1302 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001303 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001304 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001305 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001306 break;
1307 case GSI_EP_OP_STARTXFER:
1308 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001309 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001310 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001311 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001312 break;
1313 case GSI_EP_OP_GET_XFER_IDX:
1314 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1315 ret = gsi_get_xfer_index(ep);
1316 break;
1317 case GSI_EP_OP_STORE_DBL_INFO:
1318 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1319 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1320 break;
1321 case GSI_EP_OP_ENABLE_GSI:
1322 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1323 gsi_enable(ep);
1324 break;
1325 case GSI_EP_OP_GET_CH_INFO:
1326 ch_info = (struct gsi_channel_info *)op_data;
1327 gsi_get_channel_info(ep, ch_info);
1328 break;
1329 case GSI_EP_OP_RING_IN_DB:
1330 request = (struct usb_gsi_request *)op_data;
1331 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1332 gsi_ring_in_db(ep, request);
1333 break;
1334 case GSI_EP_OP_UPDATEXFER:
1335 request = (struct usb_gsi_request *)op_data;
1336 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001337 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001338 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001339 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001340 break;
1341 case GSI_EP_OP_ENDXFER:
1342 request = (struct usb_gsi_request *)op_data;
1343 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001344 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001345 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001346 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001347 break;
1348 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1349 block_db = *((bool *)op_data);
1350 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1351 block_db);
1352 gsi_set_clear_dbell(ep, block_db);
1353 break;
1354 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1355 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1356 f_suspend = *((bool *)op_data);
1357 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1358 break;
1359 case GSI_EP_OP_DISABLE:
1360 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1361 ret = ep->ops->disable(ep);
1362 break;
1363 default:
1364 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1365 }
1366
1367 return ret;
1368}
1369
1370/**
1371 * Configure MSM endpoint.
1372 * This function do specific configurations
1373 * to an endpoint which need specific implementaion
1374 * in the MSM architecture.
1375 *
1376 * This function should be called by usb function/class
1377 * layer which need a support from the specific MSM HW
1378 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1379 *
1380 * @ep - a pointer to some usb_ep instance
1381 *
1382 * @return int - 0 on success, negetive on error.
1383 */
1384int msm_ep_config(struct usb_ep *ep)
1385{
1386 struct dwc3_ep *dep = to_dwc3_ep(ep);
1387 struct dwc3 *dwc = dep->dwc;
1388 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1389 struct usb_ep_ops *new_ep_ops;
1390
1391
1392 /* Save original ep ops for future restore*/
1393 if (mdwc->original_ep_ops[dep->number]) {
1394 dev_err(mdwc->dev,
1395 "ep [%s,%d] already configured as msm endpoint\n",
1396 ep->name, dep->number);
1397 return -EPERM;
1398 }
1399 mdwc->original_ep_ops[dep->number] = ep->ops;
1400
1401 /* Set new usb ops as we like */
1402 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1403 if (!new_ep_ops)
1404 return -ENOMEM;
1405
1406 (*new_ep_ops) = (*ep->ops);
1407 new_ep_ops->queue = dwc3_msm_ep_queue;
1408 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1409 ep->ops = new_ep_ops;
1410
1411 /*
1412 * Do HERE more usb endpoint configurations
1413 * which are specific to MSM.
1414 */
1415
1416 return 0;
1417}
1418EXPORT_SYMBOL(msm_ep_config);
1419
1420/**
1421 * Un-configure MSM endpoint.
1422 * Tear down configurations done in the
1423 * dwc3_msm_ep_config function.
1424 *
1425 * @ep - a pointer to some usb_ep instance
1426 *
1427 * @return int - 0 on success, negative on error.
1428 */
1429int msm_ep_unconfig(struct usb_ep *ep)
1430{
1431 struct dwc3_ep *dep = to_dwc3_ep(ep);
1432 struct dwc3 *dwc = dep->dwc;
1433 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1434 struct usb_ep_ops *old_ep_ops;
1435
1436 /* Restore original ep ops */
1437 if (!mdwc->original_ep_ops[dep->number]) {
1438 dev_err(mdwc->dev,
1439 "ep [%s,%d] was not configured as msm endpoint\n",
1440 ep->name, dep->number);
1441 return -EINVAL;
1442 }
1443 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1444 ep->ops = mdwc->original_ep_ops[dep->number];
1445 mdwc->original_ep_ops[dep->number] = NULL;
1446 kfree(old_ep_ops);
1447
1448 /*
1449 * Do HERE more usb endpoint un-configurations
1450 * which are specific to MSM.
1451 */
1452
1453 return 0;
1454}
1455EXPORT_SYMBOL(msm_ep_unconfig);
1456#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1457
1458static void dwc3_resume_work(struct work_struct *w);
1459
1460static void dwc3_restart_usb_work(struct work_struct *w)
1461{
1462 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1463 restart_usb_work);
1464 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1465 unsigned int timeout = 50;
1466
1467 dev_dbg(mdwc->dev, "%s\n", __func__);
1468
1469 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1470 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1471 return;
1472 }
1473
1474 /* guard against concurrent VBUS handling */
1475 mdwc->in_restart = true;
1476
1477 if (!mdwc->vbus_active) {
1478 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1479 dwc->err_evt_seen = false;
1480 mdwc->in_restart = false;
1481 return;
1482 }
1483
Mayank Rana08e41922017-03-02 15:25:48 -08001484 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001485 /* Reset active USB connection */
1486 dwc3_resume_work(&mdwc->resume_work);
1487
1488 /* Make sure disconnect is processed before sending connect */
1489 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1490 msleep(20);
1491
1492 if (!timeout) {
1493 dev_dbg(mdwc->dev,
1494 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001495 dbg_event(0xFF, "ReStart:RT SUSP",
1496 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001497 pm_runtime_suspend(mdwc->dev);
1498 }
1499
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301500 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001501 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301502 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001503 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001504
1505 dwc->err_evt_seen = false;
1506 flush_delayed_work(&mdwc->sm_work);
1507}
1508
1509/*
1510 * Check whether the DWC3 requires resetting the ep
1511 * after going to Low Power Mode (lpm)
1512 */
1513bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1514{
1515 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1516 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1517
1518 return dbm_reset_ep_after_lpm(mdwc->dbm);
1519}
1520EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1521
1522/*
1523 * Config Global Distributed Switch Controller (GDSC)
1524 * to support controller power collapse
1525 */
1526static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1527{
1528 int ret;
1529
1530 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1531 return -EPERM;
1532
1533 if (on) {
1534 ret = regulator_enable(mdwc->dwc3_gdsc);
1535 if (ret) {
1536 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1537 return ret;
1538 }
1539 } else {
1540 ret = regulator_disable(mdwc->dwc3_gdsc);
1541 if (ret) {
1542 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1543 return ret;
1544 }
1545 }
1546
1547 return ret;
1548}
1549
1550static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1551{
1552 int ret = 0;
1553
1554 if (assert) {
1555 disable_irq(mdwc->pwr_event_irq);
1556 /* Using asynchronous block reset to the hardware */
1557 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1558 clk_disable_unprepare(mdwc->utmi_clk);
1559 clk_disable_unprepare(mdwc->sleep_clk);
1560 clk_disable_unprepare(mdwc->core_clk);
1561 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301562 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001563 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301564 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001565 } else {
1566 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301567 ret = reset_control_deassert(mdwc->core_reset);
1568 if (ret)
1569 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001570 ndelay(200);
1571 clk_prepare_enable(mdwc->iface_clk);
1572 clk_prepare_enable(mdwc->core_clk);
1573 clk_prepare_enable(mdwc->sleep_clk);
1574 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001575 enable_irq(mdwc->pwr_event_irq);
1576 }
1577
1578 return ret;
1579}
1580
1581static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1582{
1583 u32 guctl, gfladj = 0;
1584
1585 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1586 guctl &= ~DWC3_GUCTL_REFCLKPER;
1587
1588 /* GFLADJ register is used starting with revision 2.50a */
1589 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1590 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1591 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1592 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1593 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1594 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1595 }
1596
1597 /* Refer to SNPS Databook Table 6-55 for calculations used */
1598 switch (mdwc->utmi_clk_rate) {
1599 case 19200000:
1600 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1601 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1602 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1603 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1604 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1605 break;
1606 case 24000000:
1607 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1608 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1609 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1610 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1611 break;
1612 default:
1613 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1614 mdwc->utmi_clk_rate);
1615 break;
1616 }
1617
1618 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1619 if (gfladj)
1620 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1621}
1622
1623/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1624static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1625{
1626 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1627 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1628 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1629 BIT(2), 1);
1630
1631 /*
1632 * Enable master clock for RAMs to allow BAM to access RAMs when
1633 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1634 * are seen where RAM clocks get turned OFF in SS mode
1635 */
1636 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1637 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1638
1639}
1640
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001641static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1642{
1643 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1644 vbus_draw_work);
1645 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1646
1647 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1648}
1649
Mayank Rana511f3b22016-08-02 12:00:11 -07001650static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1651{
1652 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001653 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001654 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001655 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001656
1657 switch (event) {
1658 case DWC3_CONTROLLER_ERROR_EVENT:
1659 dev_info(mdwc->dev,
1660 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1661 dwc->irq_cnt);
1662
1663 dwc3_gadget_disable_irq(dwc);
1664
1665 /* prevent core from generating interrupts until recovery */
1666 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1667 reg |= DWC3_GCTL_CORESOFTRESET;
1668 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1669
1670 /* restart USB which performs full reset and reconnect */
1671 schedule_work(&mdwc->restart_usb_work);
1672 break;
1673 case DWC3_CONTROLLER_RESET_EVENT:
1674 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1675 /* HS & SSPHYs get reset as part of core soft reset */
1676 dwc3_msm_qscratch_reg_init(mdwc);
1677 break;
1678 case DWC3_CONTROLLER_POST_RESET_EVENT:
1679 dev_dbg(mdwc->dev,
1680 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1681
1682 /*
1683 * Below sequence is used when controller is working without
1684 * having ssphy and only USB high speed is supported.
1685 */
1686 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1687 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1688 dwc3_msm_read_reg(mdwc->base,
1689 QSCRATCH_GENERAL_CFG)
1690 | PIPE_UTMI_CLK_DIS);
1691
1692 usleep_range(2, 5);
1693
1694
1695 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1696 dwc3_msm_read_reg(mdwc->base,
1697 QSCRATCH_GENERAL_CFG)
1698 | PIPE_UTMI_CLK_SEL
1699 | PIPE3_PHYSTATUS_SW);
1700
1701 usleep_range(2, 5);
1702
1703 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1704 dwc3_msm_read_reg(mdwc->base,
1705 QSCRATCH_GENERAL_CFG)
1706 & ~PIPE_UTMI_CLK_DIS);
1707 }
1708
1709 dwc3_msm_update_ref_clk(mdwc);
1710 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1711 break;
1712 case DWC3_CONTROLLER_CONNDONE_EVENT:
1713 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1714 /*
1715 * Add power event if the dbm indicates coming out of L1 by
1716 * interrupt
1717 */
1718 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1719 dwc3_msm_write_reg_field(mdwc->base,
1720 PWR_EVNT_IRQ_MASK_REG,
1721 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1722
1723 atomic_set(&dwc->in_lpm, 0);
1724 break;
1725 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1726 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1727 if (dwc->enable_bus_suspend) {
1728 mdwc->suspend = dwc->b_suspend;
1729 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1730 }
1731 break;
1732 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1733 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001734 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001735 break;
1736 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1737 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001738 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001739 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001740 case DWC3_GSI_EVT_BUF_ALLOC:
1741 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1742
1743 if (!mdwc->num_gsi_event_buffers)
1744 break;
1745
1746 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1747 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1748 GFP_KERNEL);
1749 if (!mdwc->gsi_ev_buff) {
1750 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1751 break;
1752 }
1753
1754 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1755
1756 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1757 if (!evt)
1758 break;
1759 evt->dwc = dwc;
1760 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1761 evt->buf = dma_alloc_coherent(dwc->dev,
1762 DWC3_EVENT_BUFFERS_SIZE,
1763 &evt->dma, GFP_KERNEL);
1764 if (!evt->buf) {
1765 dev_err(dwc->dev,
1766 "can't allocate gsi_evt_buf(%d)\n", i);
1767 break;
1768 }
1769 mdwc->gsi_ev_buff[i] = evt;
1770 }
1771 break;
1772 case DWC3_GSI_EVT_BUF_SETUP:
1773 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1774 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1775 evt = mdwc->gsi_ev_buff[i];
1776 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1777 evt->buf, (unsigned long long) evt->dma,
1778 evt->length);
1779 memset(evt->buf, 0, evt->length);
1780 evt->lpos = 0;
1781 /*
1782 * Primary event buffer is programmed with registers
1783 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1784 * program USB GSI related event buffer with DWC3
1785 * controller.
1786 */
1787 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1788 lower_32_bits(evt->dma));
1789 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1790 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1791 DWC3_GEVENT_TYPE_GSI) |
1792 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1793 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1794 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1795 ((evt->length) & 0xffff));
1796 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1797 }
1798 break;
1799 case DWC3_GSI_EVT_BUF_CLEANUP:
1800 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1801 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1802 evt = mdwc->gsi_ev_buff[i];
1803 evt->lpos = 0;
1804 /*
1805 * Primary event buffer is programmed with registers
1806 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1807 * program USB GSI related event buffer with DWC3
1808 * controller.
1809 */
1810 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1811 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1812 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1813 DWC3_GEVNTSIZ_INTMASK |
1814 DWC3_GEVNTSIZ_SIZE((i+1)));
1815 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1816 }
1817 break;
1818 case DWC3_GSI_EVT_BUF_FREE:
1819 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1820 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1821 evt = mdwc->gsi_ev_buff[i];
1822 if (evt)
1823 dma_free_coherent(dwc->dev, evt->length,
1824 evt->buf, evt->dma);
1825 }
1826 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001827 default:
1828 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1829 break;
1830 }
1831}
1832
1833static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1834{
1835 int ret = 0;
1836
1837 if (core_reset) {
1838 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1839 if (ret)
1840 return;
1841
1842 usleep_range(1000, 1200);
1843 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1844 if (ret)
1845 return;
1846
1847 usleep_range(10000, 12000);
1848 }
1849
1850 if (mdwc->dbm) {
1851 /* Reset the DBM */
1852 dbm_soft_reset(mdwc->dbm, 1);
1853 usleep_range(1000, 1200);
1854 dbm_soft_reset(mdwc->dbm, 0);
1855
1856 /*enable DBM*/
1857 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1858 DBM_EN_MASK, 0x1);
1859 dbm_enable(mdwc->dbm);
1860 }
1861}
1862
1863static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1864{
1865 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1866 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301867 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001868
1869 /* Configure AHB2PHY for one wait state read/write */
1870 if (mdwc->ahb2phy_base) {
1871 clk_prepare_enable(mdwc->cfg_ahb_clk);
1872 val = readl_relaxed(mdwc->ahb2phy_base +
1873 PERIPH_SS_AHB2PHY_TOP_CFG);
1874 if (val != ONE_READ_WRITE_WAIT) {
1875 writel_relaxed(ONE_READ_WRITE_WAIT,
1876 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1877 /* complete above write before configuring USB PHY. */
1878 mb();
1879 }
1880 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1881 }
1882
1883 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001884 dbg_event(0xFF, "dwc3 init",
1885 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301886 ret = dwc3_core_pre_init(dwc);
1887 if (ret) {
1888 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1889 return;
1890 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001891 mdwc->init = true;
1892 }
1893
1894 dwc3_core_init(dwc);
1895 /* Re-configure event buffers */
1896 dwc3_event_buffers_setup(dwc);
1897}
1898
1899static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1900{
1901 unsigned long timeout;
1902 u32 reg = 0;
1903
1904 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301905 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001906 if (!atomic_read(&mdwc->in_p3)) {
1907 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1908 return -EBUSY;
1909 }
1910 }
1911
1912 /* Clear previous L2 events */
1913 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1914 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1915
1916 /* Prepare HSPHY for suspend */
1917 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1918 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1919 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1920
1921 /* Wait for PHY to go into L2 */
1922 timeout = jiffies + msecs_to_jiffies(5);
1923 while (!time_after(jiffies, timeout)) {
1924 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1925 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1926 break;
1927 }
1928 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1929 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1930
1931 /* Clear L2 event bit */
1932 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1933 PWR_EVNT_LPM_IN_L2_MASK);
1934
1935 return 0;
1936}
1937
1938static void dwc3_msm_bus_vote_w(struct work_struct *w)
1939{
1940 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1941 int ret;
1942
1943 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1944 mdwc->bus_vote);
1945 if (ret)
1946 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1947}
1948
1949static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1950{
1951 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1952 int i, num_ports;
1953 u32 reg;
1954
1955 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1956 if (mdwc->in_host_mode) {
1957 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1958 num_ports = HCS_MAX_PORTS(reg);
1959 for (i = 0; i < num_ports; i++) {
1960 reg = dwc3_msm_read_reg(mdwc->base,
1961 USB3_PORTSC + i*0x10);
1962 if (reg & PORT_PE) {
1963 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1964 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1965 else if (DEV_LOWSPEED(reg))
1966 mdwc->hs_phy->flags |= PHY_LS_MODE;
1967 }
1968 }
1969 } else {
1970 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1971 dwc->gadget.speed == USB_SPEED_FULL)
1972 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1973 else if (dwc->gadget.speed == USB_SPEED_LOW)
1974 mdwc->hs_phy->flags |= PHY_LS_MODE;
1975 }
1976}
1977
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05301978static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
1979 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07001980
1981static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1982{
Mayank Rana83ad5822016-08-09 14:17:22 -07001983 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001984 bool can_suspend_ssphy;
1985 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001986 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001987
1988 if (atomic_read(&dwc->in_lpm)) {
1989 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1990 return 0;
1991 }
1992
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05301993 cancel_delayed_work_sync(&mdwc->perf_vote_work);
1994 msm_dwc3_perf_vote_update(mdwc, false);
1995
Mayank Rana511f3b22016-08-02 12:00:11 -07001996 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001997 evt = dwc->ev_buf;
1998 if ((evt->flags & DWC3_EVENT_PENDING)) {
1999 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002000 "%s: %d device events pending, abort suspend\n",
2001 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07002002 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002003 }
2004 }
2005
2006 if (!mdwc->vbus_active && dwc->is_drd &&
2007 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2008 /*
2009 * In some cases, the pm_runtime_suspend may be called by
2010 * usb_bam when there is pending lpm flag. However, if this is
2011 * done when cable was disconnected and otg state has not
2012 * yet changed to IDLE, then it means OTG state machine
2013 * is running and we race against it. So cancel LPM for now,
2014 * and OTG state machine will go for LPM later, after completing
2015 * transition to IDLE state.
2016 */
2017 dev_dbg(mdwc->dev,
2018 "%s: cable disconnected while not in idle otg state\n",
2019 __func__);
2020 return -EBUSY;
2021 }
2022
2023 /*
2024 * Check if device is not in CONFIGURED state
2025 * then check controller state of L2 and break
2026 * LPM sequence. Check this for device bus suspend case.
2027 */
2028 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2029 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2030 pr_err("%s(): Trying to go in LPM with state:%d\n",
2031 __func__, dwc->gadget.state);
2032 pr_err("%s(): LPM is not performed.\n", __func__);
2033 return -EBUSY;
2034 }
2035
2036 ret = dwc3_msm_prepare_suspend(mdwc);
2037 if (ret)
2038 return ret;
2039
2040 /* Initialize variables here */
2041 can_suspend_ssphy = !(mdwc->in_host_mode &&
2042 dwc3_msm_is_host_superspeed(mdwc));
2043
2044 /* Disable core irq */
2045 if (dwc->irq)
2046 disable_irq(dwc->irq);
2047
2048 /* disable power event irq, hs and ss phy irq is used as wake up src */
2049 disable_irq(mdwc->pwr_event_irq);
2050
2051 dwc3_set_phy_speed_flags(mdwc);
2052 /* Suspend HS PHY */
2053 usb_phy_set_suspend(mdwc->hs_phy, 1);
2054
2055 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002056 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002057 /* indicate phy about SS mode */
2058 if (dwc3_msm_is_superspeed(mdwc))
2059 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2060 usb_phy_set_suspend(mdwc->ss_phy, 1);
2061 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2062 }
2063
2064 /* make sure above writes are completed before turning off clocks */
2065 wmb();
2066
2067 /* Disable clocks */
2068 if (mdwc->bus_aggr_clk)
2069 clk_disable_unprepare(mdwc->bus_aggr_clk);
2070 clk_disable_unprepare(mdwc->utmi_clk);
2071
Hemant Kumar633dc332016-08-10 13:41:05 -07002072 /* Memory core: OFF, Memory periphery: OFF */
2073 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2074 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2075 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2076 }
2077
Mayank Rana511f3b22016-08-02 12:00:11 -07002078 clk_set_rate(mdwc->core_clk, 19200000);
2079 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302080 if (mdwc->noc_aggr_clk)
2081 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002082 /*
2083 * Disable iface_clk only after core_clk as core_clk has FSM
2084 * depedency on iface_clk. Hence iface_clk should be turned off
2085 * after core_clk is turned off.
2086 */
2087 clk_disable_unprepare(mdwc->iface_clk);
2088 /* USB PHY no more requires TCXO */
2089 clk_disable_unprepare(mdwc->xo_clk);
2090
2091 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002092 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002093 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2094 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2095 dwc3_msm_config_gdsc(mdwc, 0);
2096 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002097
2098 if (mdwc->iommu_map)
2099 arm_iommu_detach_device(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002100 }
2101
2102 /* Remove bus voting */
2103 if (mdwc->bus_perf_client) {
2104 mdwc->bus_vote = 0;
2105 schedule_work(&mdwc->bus_vote_w);
2106 }
2107
2108 /*
2109 * release wakeup source with timeout to defer system suspend to
2110 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2111 * event is received.
2112 */
2113 if (mdwc->lpm_to_suspend_delay) {
2114 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2115 mdwc->lpm_to_suspend_delay);
2116 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2117 } else {
2118 pm_relax(mdwc->dev);
2119 }
2120
2121 atomic_set(&dwc->in_lpm, 1);
2122
2123 /*
2124 * with DCP or during cable disconnect, we dont require wakeup
2125 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2126 * case of host bus suspend and device bus suspend.
2127 */
2128 if (mdwc->vbus_active || mdwc->in_host_mode) {
2129 enable_irq_wake(mdwc->hs_phy_irq);
2130 enable_irq(mdwc->hs_phy_irq);
2131 if (mdwc->ss_phy_irq) {
2132 enable_irq_wake(mdwc->ss_phy_irq);
2133 enable_irq(mdwc->ss_phy_irq);
2134 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002135 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2136 }
2137
2138 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2139 return 0;
2140}
2141
2142static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2143{
2144 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002145 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002146 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2147
2148 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2149
2150 if (!atomic_read(&dwc->in_lpm)) {
2151 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2152 return 0;
2153 }
2154
2155 pm_stay_awake(mdwc->dev);
2156
2157 /* Enable bus voting */
2158 if (mdwc->bus_perf_client) {
2159 mdwc->bus_vote = 1;
2160 schedule_work(&mdwc->bus_vote_w);
2161 }
2162
2163 /* Vote for TCXO while waking up USB HSPHY */
2164 ret = clk_prepare_enable(mdwc->xo_clk);
2165 if (ret)
2166 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2167 __func__, ret);
2168
2169 /* Restore controller power collapse */
2170 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2171 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2172 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302173 ret = reset_control_assert(mdwc->core_reset);
2174 if (ret)
2175 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2176 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002177 /* HW requires a short delay for reset to take place properly */
2178 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302179 ret = reset_control_deassert(mdwc->core_reset);
2180 if (ret)
2181 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2182 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002183 clk_prepare_enable(mdwc->sleep_clk);
2184 }
2185
2186 /*
2187 * Enable clocks
2188 * Turned ON iface_clk before core_clk due to FSM depedency.
2189 */
2190 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302191 if (mdwc->noc_aggr_clk)
2192 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002193
2194 core_clk_rate = mdwc->core_clk_rate;
2195 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2196 core_clk_rate = mdwc->core_clk_rate_hs;
2197 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2198 core_clk_rate);
2199 }
2200
2201 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002202 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002203
2204 /* set Memory core: ON, Memory periphery: ON */
2205 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2206 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2207
Mayank Rana511f3b22016-08-02 12:00:11 -07002208 clk_prepare_enable(mdwc->utmi_clk);
2209 if (mdwc->bus_aggr_clk)
2210 clk_prepare_enable(mdwc->bus_aggr_clk);
2211
2212 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002213 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2214 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002215 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2216 if (mdwc->typec_orientation == ORIENTATION_CC1)
2217 mdwc->ss_phy->flags |= PHY_LANE_A;
2218 if (mdwc->typec_orientation == ORIENTATION_CC2)
2219 mdwc->ss_phy->flags |= PHY_LANE_B;
2220 usb_phy_set_suspend(mdwc->ss_phy, 0);
2221 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2222 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2223 }
2224
2225 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2226 /* Resume HS PHY */
2227 usb_phy_set_suspend(mdwc->hs_phy, 0);
2228
2229 /* Recover from controller power collapse */
2230 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2231 u32 tmp;
2232
2233 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2234
2235 dwc3_msm_power_collapse_por(mdwc);
2236
2237 /* Get initial P3 status and enable IN_P3 event */
2238 tmp = dwc3_msm_read_reg_field(mdwc->base,
2239 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2240 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2241 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2242 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2243
2244 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
Jack Phambbe27962017-03-23 18:42:26 -07002245
2246 if (mdwc->iommu_map) {
2247 ret = arm_iommu_attach_device(mdwc->dev,
2248 mdwc->iommu_map);
2249 if (ret)
2250 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2251 ret);
2252 else
2253 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2254 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002255 }
2256
2257 atomic_set(&dwc->in_lpm, 0);
2258
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302259 /* enable power evt irq for IN P3 detection */
2260 enable_irq(mdwc->pwr_event_irq);
2261
Mayank Rana511f3b22016-08-02 12:00:11 -07002262 /* Disable HSPHY auto suspend */
2263 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2264 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2265 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2266 DWC3_GUSB2PHYCFG_SUSPHY));
2267
2268 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2269 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2270 disable_irq_wake(mdwc->hs_phy_irq);
2271 disable_irq_nosync(mdwc->hs_phy_irq);
2272 if (mdwc->ss_phy_irq) {
2273 disable_irq_wake(mdwc->ss_phy_irq);
2274 disable_irq_nosync(mdwc->ss_phy_irq);
2275 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002276 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2277 }
2278
2279 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2280
Mayank Rana511f3b22016-08-02 12:00:11 -07002281 /* Enable core irq */
2282 if (dwc->irq)
2283 enable_irq(dwc->irq);
2284
2285 /*
2286 * Handle other power events that could not have been handled during
2287 * Low Power Mode
2288 */
2289 dwc3_pwr_event_handler(mdwc);
2290
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302291 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2292 schedule_delayed_work(&mdwc->perf_vote_work,
2293 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2294
Mayank Rana08e41922017-03-02 15:25:48 -08002295 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002296 return 0;
2297}
2298
2299/**
2300 * dwc3_ext_event_notify - callback to handle events from external transceiver
2301 *
2302 * Returns 0 on success
2303 */
2304static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2305{
2306 /* Flush processing any pending events before handling new ones */
2307 flush_delayed_work(&mdwc->sm_work);
2308
2309 if (mdwc->id_state == DWC3_ID_FLOAT) {
2310 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2311 set_bit(ID, &mdwc->inputs);
2312 } else {
2313 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2314 clear_bit(ID, &mdwc->inputs);
2315 }
2316
2317 if (mdwc->vbus_active && !mdwc->in_restart) {
2318 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2319 set_bit(B_SESS_VLD, &mdwc->inputs);
2320 } else {
2321 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2322 clear_bit(B_SESS_VLD, &mdwc->inputs);
2323 }
2324
2325 if (mdwc->suspend) {
2326 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2327 set_bit(B_SUSPEND, &mdwc->inputs);
2328 } else {
2329 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2330 clear_bit(B_SUSPEND, &mdwc->inputs);
2331 }
2332
2333 schedule_delayed_work(&mdwc->sm_work, 0);
2334}
2335
2336static void dwc3_resume_work(struct work_struct *w)
2337{
2338 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002339 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002340
2341 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2342
2343 /*
2344 * exit LPM first to meet resume timeline from device side.
2345 * resume_pending flag would prevent calling
2346 * dwc3_msm_resume() in case we are here due to system
2347 * wide resume without usb cable connected. This flag is set
2348 * only in case of power event irq in lpm.
2349 */
2350 if (mdwc->resume_pending) {
2351 dwc3_msm_resume(mdwc);
2352 mdwc->resume_pending = false;
2353 }
2354
Mayank Rana08e41922017-03-02 15:25:48 -08002355 if (atomic_read(&mdwc->pm_suspended)) {
2356 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002357 /* let pm resume kick in resume work later */
2358 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002359 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002360 dwc3_ext_event_notify(mdwc);
2361}
2362
2363static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2364{
2365 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2366 u32 irq_stat, irq_clear = 0;
2367
2368 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2369 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2370
2371 /* Check for P3 events */
2372 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2373 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2374 /* Can't tell if entered or exit P3, so check LINKSTATE */
2375 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2376 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2377 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2378 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2379
2380 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2381 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2382 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2383 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2384 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2385 atomic_set(&mdwc->in_p3, 0);
2386 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2387 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2388 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2389 atomic_set(&mdwc->in_p3, 1);
2390 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2391 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2392 }
2393
2394 /* Clear L2 exit */
2395 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2396 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2397 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2398 }
2399
2400 /* Handle exit from L1 events */
2401 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2402 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2403 __func__);
2404 if (usb_gadget_wakeup(&dwc->gadget))
2405 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2406 __func__);
2407 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2408 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2409 }
2410
2411 /* Unhandled events */
2412 if (irq_stat)
2413 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2414 __func__, irq_stat);
2415
2416 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2417}
2418
2419static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2420{
2421 struct dwc3_msm *mdwc = _mdwc;
2422 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2423
2424 dev_dbg(mdwc->dev, "%s\n", __func__);
2425
2426 if (atomic_read(&dwc->in_lpm))
2427 dwc3_resume_work(&mdwc->resume_work);
2428 else
2429 dwc3_pwr_event_handler(mdwc);
2430
Mayank Rana08e41922017-03-02 15:25:48 -08002431 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002432 return IRQ_HANDLED;
2433}
2434
2435static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2436{
2437 struct dwc3_msm *mdwc = data;
2438 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2439
2440 dwc->t_pwr_evt_irq = ktime_get();
2441 dev_dbg(mdwc->dev, "%s received\n", __func__);
2442 /*
2443 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2444 * which interrupts have been triggered, as the clocks are disabled.
2445 * Resume controller by waking up pwr event irq thread.After re-enabling
2446 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2447 * all other power events.
2448 */
2449 if (atomic_read(&dwc->in_lpm)) {
2450 /* set this to call dwc3_msm_resume() */
2451 mdwc->resume_pending = true;
2452 return IRQ_WAKE_THREAD;
2453 }
2454
2455 dwc3_pwr_event_handler(mdwc);
2456 return IRQ_HANDLED;
2457}
2458
2459static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2460 unsigned long action, void *hcpu)
2461{
2462 uint32_t cpu = (uintptr_t)hcpu;
2463 struct dwc3_msm *mdwc =
2464 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2465
2466 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2467 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2468 cpu_to_affin, mdwc->irq_to_affin);
2469 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2470 }
2471
2472 return NOTIFY_OK;
2473}
2474
2475static void dwc3_otg_sm_work(struct work_struct *w);
2476
2477static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2478{
2479 int ret;
2480
2481 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2482 if (IS_ERR(mdwc->dwc3_gdsc))
2483 mdwc->dwc3_gdsc = NULL;
2484
2485 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2486 if (IS_ERR(mdwc->xo_clk)) {
2487 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2488 __func__);
2489 ret = PTR_ERR(mdwc->xo_clk);
2490 return ret;
2491 }
2492 clk_set_rate(mdwc->xo_clk, 19200000);
2493
2494 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2495 if (IS_ERR(mdwc->iface_clk)) {
2496 dev_err(mdwc->dev, "failed to get iface_clk\n");
2497 ret = PTR_ERR(mdwc->iface_clk);
2498 return ret;
2499 }
2500
2501 /*
2502 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2503 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2504 * On newer platform it can run at 150MHz as well.
2505 */
2506 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2507 if (IS_ERR(mdwc->core_clk)) {
2508 dev_err(mdwc->dev, "failed to get core_clk\n");
2509 ret = PTR_ERR(mdwc->core_clk);
2510 return ret;
2511 }
2512
Amit Nischal4d278212016-06-06 17:54:34 +05302513 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2514 if (IS_ERR(mdwc->core_reset)) {
2515 dev_err(mdwc->dev, "failed to get core_reset\n");
2516 return PTR_ERR(mdwc->core_reset);
2517 }
2518
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302519 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302520 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302521 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2522 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302523 }
2524
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302525 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302526 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302527 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2528 mdwc->core_clk_rate);
2529 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2530 if (ret)
2531 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002532
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002533 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2534 (u32 *)&mdwc->core_clk_rate_hs)) {
2535 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2536 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2537 }
2538
Mayank Rana511f3b22016-08-02 12:00:11 -07002539 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2540 if (IS_ERR(mdwc->sleep_clk)) {
2541 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2542 ret = PTR_ERR(mdwc->sleep_clk);
2543 return ret;
2544 }
2545
2546 clk_set_rate(mdwc->sleep_clk, 32000);
2547 mdwc->utmi_clk_rate = 19200000;
2548 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2549 if (IS_ERR(mdwc->utmi_clk)) {
2550 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2551 ret = PTR_ERR(mdwc->utmi_clk);
2552 return ret;
2553 }
2554
2555 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2556 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2557 if (IS_ERR(mdwc->bus_aggr_clk))
2558 mdwc->bus_aggr_clk = NULL;
2559
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302560 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2561 if (IS_ERR(mdwc->noc_aggr_clk))
2562 mdwc->noc_aggr_clk = NULL;
2563
Mayank Rana511f3b22016-08-02 12:00:11 -07002564 if (of_property_match_string(mdwc->dev->of_node,
2565 "clock-names", "cfg_ahb_clk") >= 0) {
2566 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2567 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2568 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2569 mdwc->cfg_ahb_clk = NULL;
2570 if (ret != -EPROBE_DEFER)
2571 dev_err(mdwc->dev,
2572 "failed to get cfg_ahb_clk ret %d\n",
2573 ret);
2574 return ret;
2575 }
2576 }
2577
2578 return 0;
2579}
2580
2581static int dwc3_msm_id_notifier(struct notifier_block *nb,
2582 unsigned long event, void *ptr)
2583{
2584 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002585 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002586 struct extcon_dev *edev = ptr;
2587 enum dwc3_id_state id;
2588 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002589 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002590
2591 if (!edev) {
2592 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2593 goto done;
2594 }
2595
2596 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2597
2598 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2599
2600 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2601 if (cc_state < 0)
2602 mdwc->typec_orientation = ORIENTATION_NONE;
2603 else
2604 mdwc->typec_orientation =
2605 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2606
Mayank Rana08e41922017-03-02 15:25:48 -08002607 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002608
2609 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
Vijayavardhan Vennapusa1965ae982017-01-16 13:35:17 +05302610 /* Use default dwc->maximum_speed if extcon doesn't report speed. */
2611 if (speed >= 0)
2612 dwc->maximum_speed =
2613 (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2614
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002615 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2616 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002617
Mayank Rana511f3b22016-08-02 12:00:11 -07002618 if (mdwc->id_state != id) {
2619 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002620 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002621 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2622 }
2623
2624done:
2625 return NOTIFY_DONE;
2626}
2627
2628static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2629 unsigned long event, void *ptr)
2630{
2631 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2632 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2633 struct extcon_dev *edev = ptr;
2634 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002635 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002636
2637 if (!edev) {
2638 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2639 goto done;
2640 }
2641
2642 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2643
2644 if (mdwc->vbus_active == event)
2645 return NOTIFY_DONE;
2646
2647 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2648 if (cc_state < 0)
2649 mdwc->typec_orientation = ORIENTATION_NONE;
2650 else
2651 mdwc->typec_orientation =
2652 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2653
Mayank Rana08e41922017-03-02 15:25:48 -08002654 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002655
2656 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
Vijayavardhan Vennapusa1965ae982017-01-16 13:35:17 +05302657 /* Use default dwc->maximum_speed if extcon doesn't report speed. */
2658 if (speed >= 0)
2659 dwc->maximum_speed =
2660 (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2661
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002662 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2663 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002664
Mayank Rana511f3b22016-08-02 12:00:11 -07002665 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002666 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002667 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002668done:
2669 return NOTIFY_DONE;
2670}
Mayank Rana51958172017-02-28 14:49:21 -08002671/*
2672 * Handle EUD based soft detach/attach event, and force USB high speed mode
2673 * functionality on receiving soft attach event.
2674 *
2675 * @nb - notifier handler
2676 * @event - event information i.e. soft detach/attach event
2677 * @ptr - extcon_dev pointer
2678 *
2679 * @return int - NOTIFY_DONE always due to EUD
2680 */
2681static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2682 unsigned long event, void *ptr)
2683{
2684 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2685 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2686 struct extcon_dev *edev = ptr;
2687
2688 if (!edev) {
2689 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2690 goto done;
2691 }
2692
2693 dbg_event(0xFF, "EUD_NB", event);
2694 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2695 if (mdwc->vbus_active == event)
2696 return NOTIFY_DONE;
2697
2698 /* Force USB High-Speed enumeration Only */
2699 dwc->maximum_speed = USB_SPEED_HIGH;
2700 dbg_event(0xFF, "Speed", dwc->maximum_speed);
2701 mdwc->vbus_active = event;
2702 if (dwc->is_drd && !mdwc->in_restart)
2703 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2704done:
2705 return NOTIFY_DONE;
2706}
Mayank Rana511f3b22016-08-02 12:00:11 -07002707
2708static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2709{
2710 struct device_node *node = mdwc->dev->of_node;
2711 struct extcon_dev *edev;
2712 int ret = 0;
2713
2714 if (!of_property_read_bool(node, "extcon"))
2715 return 0;
2716
Mayank Rana51958172017-02-28 14:49:21 -08002717 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002718 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2719 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2720 return PTR_ERR(edev);
2721
2722 if (!IS_ERR(edev)) {
2723 mdwc->extcon_vbus = edev;
2724 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2725 ret = extcon_register_notifier(edev, EXTCON_USB,
2726 &mdwc->vbus_nb);
2727 if (ret < 0) {
2728 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2729 return ret;
2730 }
2731 }
2732
Mayank Rana51958172017-02-28 14:49:21 -08002733 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002734 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2735 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2736 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2737 ret = PTR_ERR(edev);
2738 goto err;
2739 }
2740 }
2741
2742 if (!IS_ERR(edev)) {
2743 mdwc->extcon_id = edev;
2744 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2745 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2746 &mdwc->id_nb);
2747 if (ret < 0) {
2748 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2749 goto err;
2750 }
2751 }
2752
Mayank Rana51958172017-02-28 14:49:21 -08002753 /* Use third phandle (optional) for EUD based detach/attach events */
2754 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2755 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2756 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2757 ret = PTR_ERR(edev);
2758 goto err;
2759 }
2760 }
2761
2762 if (!IS_ERR(edev)) {
2763 mdwc->extcon_eud = edev;
2764 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2765 ret = extcon_register_notifier(edev, EXTCON_USB,
2766 &mdwc->eud_event_nb);
2767 if (ret < 0) {
2768 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
2769 goto err1;
2770 }
2771 }
2772
Mayank Rana511f3b22016-08-02 12:00:11 -07002773 return 0;
Mayank Rana51958172017-02-28 14:49:21 -08002774err1:
2775 if (mdwc->extcon_id)
2776 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2777 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002778err:
2779 if (mdwc->extcon_vbus)
2780 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2781 &mdwc->vbus_nb);
2782 return ret;
2783}
2784
Jack Phambbe27962017-03-23 18:42:26 -07002785#define SMMU_BASE 0x10000000 /* Device address range base */
2786#define SMMU_SIZE 0x40000000 /* Device address range size */
2787
2788static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2789{
2790 struct device_node *node = mdwc->dev->of_node;
2791 int atomic_ctx = 1;
2792 int ret;
2793
2794 if (!of_property_read_bool(node, "iommus"))
2795 return 0;
2796
2797 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
2798 SMMU_BASE, SMMU_SIZE);
2799 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
2800 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
2801 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
2802 ret);
2803 return ret;
2804 }
2805 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
2806
2807 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
2808 &atomic_ctx);
2809 if (ret) {
2810 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
2811 ret);
2812 arm_iommu_release_mapping(mdwc->iommu_map);
2813 mdwc->iommu_map = NULL;
2814 return ret;
2815 }
2816
2817 return 0;
2818}
2819
Mayank Rana511f3b22016-08-02 12:00:11 -07002820static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2821 char *buf)
2822{
2823 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2824
2825 if (mdwc->vbus_active)
2826 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2827 if (mdwc->id_state == DWC3_ID_GROUND)
2828 return snprintf(buf, PAGE_SIZE, "host\n");
2829
2830 return snprintf(buf, PAGE_SIZE, "none\n");
2831}
2832
2833static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2834 const char *buf, size_t count)
2835{
2836 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2837
2838 if (sysfs_streq(buf, "peripheral")) {
2839 mdwc->vbus_active = true;
2840 mdwc->id_state = DWC3_ID_FLOAT;
2841 } else if (sysfs_streq(buf, "host")) {
2842 mdwc->vbus_active = false;
2843 mdwc->id_state = DWC3_ID_GROUND;
2844 } else {
2845 mdwc->vbus_active = false;
2846 mdwc->id_state = DWC3_ID_FLOAT;
2847 }
2848
2849 dwc3_ext_event_notify(mdwc);
2850
2851 return count;
2852}
2853
2854static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302855static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07002856
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08002857static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
2858 char *buf)
2859{
2860 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2861 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2862
2863 return snprintf(buf, PAGE_SIZE, "%s\n",
2864 usb_speed_string(dwc->max_hw_supp_speed));
2865}
2866
2867static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
2868 const char *buf, size_t count)
2869{
2870 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2871 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2872 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
2873
2874 if (sysfs_streq(buf, "high"))
2875 req_speed = USB_SPEED_HIGH;
2876 else if (sysfs_streq(buf, "super"))
2877 req_speed = USB_SPEED_SUPER;
2878
2879 if (req_speed != USB_SPEED_UNKNOWN &&
2880 req_speed != dwc->max_hw_supp_speed) {
2881 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
2882 schedule_work(&mdwc->restart_usb_work);
2883 }
2884
2885 return count;
2886}
2887static DEVICE_ATTR_RW(speed);
2888
Mayank Rana511f3b22016-08-02 12:00:11 -07002889static int dwc3_msm_probe(struct platform_device *pdev)
2890{
2891 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2892 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08002893 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07002894 struct dwc3_msm *mdwc;
2895 struct dwc3 *dwc;
2896 struct resource *res;
2897 void __iomem *tcsr;
2898 bool host_mode;
2899 int ret = 0;
2900 int ext_hub_reset_gpio;
2901 u32 val;
2902
2903 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2904 if (!mdwc)
2905 return -ENOMEM;
2906
2907 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2908 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2909 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2910 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2911 return -EOPNOTSUPP;
2912 }
2913 }
2914
2915 platform_set_drvdata(pdev, mdwc);
2916 mdwc->dev = &pdev->dev;
2917
2918 INIT_LIST_HEAD(&mdwc->req_complete_list);
2919 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2920 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2921 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002922 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002923 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302924 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002925
2926 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2927 if (!mdwc->dwc3_wq) {
2928 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2929 return -ENOMEM;
2930 }
2931
2932 /* Get all clks and gdsc reference */
2933 ret = dwc3_msm_get_clk_gdsc(mdwc);
2934 if (ret) {
2935 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2936 return ret;
2937 }
2938
2939 mdwc->id_state = DWC3_ID_FLOAT;
2940 set_bit(ID, &mdwc->inputs);
2941
2942 mdwc->charging_disabled = of_property_read_bool(node,
2943 "qcom,charging-disabled");
2944
2945 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2946 &mdwc->lpm_to_suspend_delay);
2947 if (ret) {
2948 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2949 mdwc->lpm_to_suspend_delay = 0;
2950 }
2951
2952 /*
2953 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2954 * DP and DM linestate transitions during low power mode.
2955 */
2956 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2957 if (mdwc->hs_phy_irq < 0) {
2958 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2959 ret = -EINVAL;
2960 goto err;
2961 } else {
2962 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2963 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2964 msm_dwc3_pwr_irq,
2965 msm_dwc3_pwr_irq_thread,
2966 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2967 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2968 if (ret) {
2969 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2970 ret);
2971 goto err;
2972 }
2973 }
2974
2975 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2976 if (mdwc->ss_phy_irq < 0) {
2977 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2978 } else {
2979 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2980 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2981 msm_dwc3_pwr_irq,
2982 msm_dwc3_pwr_irq_thread,
2983 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2984 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2985 if (ret) {
2986 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2987 ret);
2988 goto err;
2989 }
2990 }
2991
2992 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2993 if (mdwc->pwr_event_irq < 0) {
2994 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2995 ret = -EINVAL;
2996 goto err;
2997 } else {
2998 /* will be enabled in dwc3_msm_resume() */
2999 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
3000 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
3001 msm_dwc3_pwr_irq,
3002 msm_dwc3_pwr_irq_thread,
3003 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
3004 "msm_dwc3", mdwc);
3005 if (ret) {
3006 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
3007 ret);
3008 goto err;
3009 }
3010 }
3011
3012 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3013 if (!res) {
3014 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3015 } else {
3016 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3017 resource_size(res));
3018 if (IS_ERR_OR_NULL(tcsr)) {
3019 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3020 } else {
3021 /* Enable USB3 on the primary USB port. */
3022 writel_relaxed(0x1, tcsr);
3023 /*
3024 * Ensure that TCSR write is completed before
3025 * USB registers initialization.
3026 */
3027 mb();
3028 }
3029 }
3030
3031 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3032 if (!res) {
3033 dev_err(&pdev->dev, "missing memory base resource\n");
3034 ret = -ENODEV;
3035 goto err;
3036 }
3037
3038 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3039 resource_size(res));
3040 if (!mdwc->base) {
3041 dev_err(&pdev->dev, "ioremap failed\n");
3042 ret = -ENODEV;
3043 goto err;
3044 }
3045
3046 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3047 "ahb2phy_base");
3048 if (res) {
3049 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3050 res->start, resource_size(res));
3051 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3052 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3053 mdwc->ahb2phy_base = NULL;
3054 } else {
3055 /*
3056 * On some targets cfg_ahb_clk depends upon usb gdsc
3057 * regulator. If cfg_ahb_clk is enabled without
3058 * turning on usb gdsc regulator clk is stuck off.
3059 */
3060 dwc3_msm_config_gdsc(mdwc, 1);
3061 clk_prepare_enable(mdwc->cfg_ahb_clk);
3062 /* Configure AHB2PHY for one wait state read/write*/
3063 val = readl_relaxed(mdwc->ahb2phy_base +
3064 PERIPH_SS_AHB2PHY_TOP_CFG);
3065 if (val != ONE_READ_WRITE_WAIT) {
3066 writel_relaxed(ONE_READ_WRITE_WAIT,
3067 mdwc->ahb2phy_base +
3068 PERIPH_SS_AHB2PHY_TOP_CFG);
3069 /* complete above write before using USB PHY */
3070 mb();
3071 }
3072 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3073 dwc3_msm_config_gdsc(mdwc, 0);
3074 }
3075 }
3076
3077 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3078 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3079 if (IS_ERR(mdwc->dbm)) {
3080 dev_err(&pdev->dev, "unable to get dbm device\n");
3081 ret = -EPROBE_DEFER;
3082 goto err;
3083 }
3084 /*
3085 * Add power event if the dbm indicates coming out of L1
3086 * by interrupt
3087 */
3088 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
3089 if (!mdwc->pwr_event_irq) {
3090 dev_err(&pdev->dev,
3091 "need pwr_event_irq exiting L1\n");
3092 ret = -EINVAL;
3093 goto err;
3094 }
3095 }
3096 }
3097
3098 ext_hub_reset_gpio = of_get_named_gpio(node,
3099 "qcom,ext-hub-reset-gpio", 0);
3100
3101 if (gpio_is_valid(ext_hub_reset_gpio)
3102 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3103 "qcom,ext-hub-reset-gpio"))) {
3104 /* reset external hub */
3105 gpio_direction_output(ext_hub_reset_gpio, 1);
3106 /*
3107 * Hub reset should be asserted for minimum 5microsec
3108 * before deasserting.
3109 */
3110 usleep_range(5, 1000);
3111 gpio_direction_output(ext_hub_reset_gpio, 0);
3112 }
3113
3114 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3115 &mdwc->tx_fifo_size))
3116 dev_err(&pdev->dev,
3117 "unable to read platform data tx fifo size\n");
3118
3119 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3120 "qcom,disable-host-mode-pm");
3121
3122 dwc3_set_notifier(&dwc3_msm_notify_event);
3123
Jack Phambbe27962017-03-23 18:42:26 -07003124 ret = dwc3_msm_init_iommu(mdwc);
3125 if (ret)
3126 goto err;
3127
Mayank Rana511f3b22016-08-02 12:00:11 -07003128 /* Assumes dwc3 is the first DT child of dwc3-msm */
3129 dwc3_node = of_get_next_available_child(node, NULL);
3130 if (!dwc3_node) {
3131 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3132 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003133 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003134 }
3135
3136 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3137 if (ret) {
3138 dev_err(&pdev->dev,
3139 "failed to add create dwc3 core\n");
3140 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003141 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003142 }
3143
3144 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3145 of_node_put(dwc3_node);
3146 if (!mdwc->dwc3) {
3147 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3148 goto put_dwc3;
3149 }
3150
3151 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3152 "usb-phy", 0);
3153 if (IS_ERR(mdwc->hs_phy)) {
3154 dev_err(&pdev->dev, "unable to get hsphy device\n");
3155 ret = PTR_ERR(mdwc->hs_phy);
3156 goto put_dwc3;
3157 }
3158 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3159 "usb-phy", 1);
3160 if (IS_ERR(mdwc->ss_phy)) {
3161 dev_err(&pdev->dev, "unable to get ssphy device\n");
3162 ret = PTR_ERR(mdwc->ss_phy);
3163 goto put_dwc3;
3164 }
3165
3166 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3167 if (mdwc->bus_scale_table) {
3168 mdwc->bus_perf_client =
3169 msm_bus_scale_register_client(mdwc->bus_scale_table);
3170 }
3171
3172 dwc = platform_get_drvdata(mdwc->dwc3);
3173 if (!dwc) {
3174 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3175 goto put_dwc3;
3176 }
3177
3178 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3179 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3180
3181 if (cpu_to_affin)
3182 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3183
Mayank Ranaf4918d32016-12-15 13:35:55 -08003184 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3185 &mdwc->num_gsi_event_buffers);
3186
Mayank Rana511f3b22016-08-02 12:00:11 -07003187 /*
3188 * Clocks and regulators will not be turned on until the first time
3189 * runtime PM resume is called. This is to allow for booting up with
3190 * charger already connected so as not to disturb PHY line states.
3191 */
3192 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3193 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003194 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3195 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003196 device_init_wakeup(mdwc->dev, 1);
3197
3198 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3199 pm_runtime_get_noresume(mdwc->dev);
3200
3201 ret = dwc3_msm_extcon_register(mdwc);
3202 if (ret)
3203 goto put_dwc3;
3204
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303205 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3206 &mdwc->pm_qos_latency);
3207 if (ret) {
3208 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3209 mdwc->pm_qos_latency = 0;
3210 }
3211
Hemant Kumar8220a982017-01-19 18:11:34 -08003212 mdwc->usb_psy = power_supply_get_by_name("usb");
3213 if (!mdwc->usb_psy) {
3214 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3215 pval.intval = -EINVAL;
3216 } else {
3217 power_supply_get_property(mdwc->usb_psy,
3218 POWER_SUPPLY_PROP_PRESENT, &pval);
3219 }
3220
Mayank Rana511f3b22016-08-02 12:00:11 -07003221 /* Update initial VBUS/ID state from extcon */
3222 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
3223 EXTCON_USB))
3224 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Hemant Kumar8220a982017-01-19 18:11:34 -08003225 else if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003226 EXTCON_USB_HOST))
3227 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003228 else if (!pval.intval) {
3229 /* USB cable is not connected */
3230 schedule_delayed_work(&mdwc->sm_work, 0);
3231 } else {
3232 if (pval.intval > 0)
3233 dev_info(mdwc->dev, "charger detection in progress\n");
3234 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003235
3236 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003237 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003238
Mayank Rana511f3b22016-08-02 12:00:11 -07003239 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3240 if (!dwc->is_drd && host_mode) {
3241 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3242 mdwc->id_state = DWC3_ID_GROUND;
3243 dwc3_ext_event_notify(mdwc);
3244 }
3245
3246 return 0;
3247
3248put_dwc3:
3249 platform_device_put(mdwc->dwc3);
3250 if (mdwc->bus_perf_client)
3251 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Jack Phambbe27962017-03-23 18:42:26 -07003252uninit_iommu:
3253 if (mdwc->iommu_map)
3254 arm_iommu_release_mapping(mdwc->iommu_map);
Mayank Rana511f3b22016-08-02 12:00:11 -07003255err:
3256 return ret;
3257}
3258
3259static int dwc3_msm_remove_children(struct device *dev, void *data)
3260{
3261 device_unregister(dev);
3262 return 0;
3263}
3264
3265static int dwc3_msm_remove(struct platform_device *pdev)
3266{
3267 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003268 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003269 int ret_pm;
3270
3271 device_remove_file(&pdev->dev, &dev_attr_mode);
3272
3273 if (cpu_to_affin)
3274 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3275
3276 /*
3277 * In case of system suspend, pm_runtime_get_sync fails.
3278 * Hence turn ON the clocks manually.
3279 */
3280 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003281 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003282 if (ret_pm < 0) {
3283 dev_err(mdwc->dev,
3284 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303285 if (mdwc->noc_aggr_clk)
3286 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003287 clk_prepare_enable(mdwc->utmi_clk);
3288 clk_prepare_enable(mdwc->core_clk);
3289 clk_prepare_enable(mdwc->iface_clk);
3290 clk_prepare_enable(mdwc->sleep_clk);
3291 if (mdwc->bus_aggr_clk)
3292 clk_prepare_enable(mdwc->bus_aggr_clk);
3293 clk_prepare_enable(mdwc->xo_clk);
3294 }
3295
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303296 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003297 cancel_delayed_work_sync(&mdwc->sm_work);
3298
3299 if (mdwc->hs_phy)
3300 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3301 platform_device_put(mdwc->dwc3);
3302 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3303
Mayank Rana08e41922017-03-02 15:25:48 -08003304 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003305 pm_runtime_disable(mdwc->dev);
3306 pm_runtime_barrier(mdwc->dev);
3307 pm_runtime_put_sync(mdwc->dev);
3308 pm_runtime_set_suspended(mdwc->dev);
3309 device_wakeup_disable(mdwc->dev);
3310
3311 if (mdwc->bus_perf_client)
3312 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3313
3314 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3315 regulator_disable(mdwc->vbus_reg);
3316
3317 disable_irq(mdwc->hs_phy_irq);
3318 if (mdwc->ss_phy_irq)
3319 disable_irq(mdwc->ss_phy_irq);
3320 disable_irq(mdwc->pwr_event_irq);
3321
3322 clk_disable_unprepare(mdwc->utmi_clk);
3323 clk_set_rate(mdwc->core_clk, 19200000);
3324 clk_disable_unprepare(mdwc->core_clk);
3325 clk_disable_unprepare(mdwc->iface_clk);
3326 clk_disable_unprepare(mdwc->sleep_clk);
3327 clk_disable_unprepare(mdwc->xo_clk);
3328 clk_put(mdwc->xo_clk);
3329
3330 dwc3_msm_config_gdsc(mdwc, 0);
3331
Jack Phambbe27962017-03-23 18:42:26 -07003332 if (mdwc->iommu_map) {
3333 if (!atomic_read(&dwc->in_lpm))
3334 arm_iommu_detach_device(mdwc->dev);
3335 arm_iommu_release_mapping(mdwc->iommu_map);
3336 }
3337
Mayank Rana511f3b22016-08-02 12:00:11 -07003338 return 0;
3339}
3340
Jack Pham4d4e9342016-12-07 19:25:02 -08003341static int dwc3_msm_host_notifier(struct notifier_block *nb,
3342 unsigned long event, void *ptr)
3343{
3344 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3345 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3346 struct usb_device *udev = ptr;
3347 union power_supply_propval pval;
3348 unsigned int max_power;
3349
3350 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3351 return NOTIFY_DONE;
3352
3353 if (!mdwc->usb_psy) {
3354 mdwc->usb_psy = power_supply_get_by_name("usb");
3355 if (!mdwc->usb_psy)
3356 return NOTIFY_DONE;
3357 }
3358
3359 /*
3360 * For direct-attach devices, new udev is direct child of root hub
3361 * i.e. dwc -> xhci -> root_hub -> udev
3362 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3363 */
3364 if (udev->parent && !udev->parent->parent &&
3365 udev->dev.parent->parent == &dwc->xhci->dev) {
3366 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003367 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3368 /*
3369 * Core clock rate can be reduced only if root
3370 * hub SS port is not enabled/connected.
3371 */
3372 clk_set_rate(mdwc->core_clk,
3373 mdwc->core_clk_rate_hs);
3374 dev_dbg(mdwc->dev,
3375 "set hs core clk rate %ld\n",
3376 mdwc->core_clk_rate_hs);
3377 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3378 } else {
3379 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3380 }
3381
Jack Pham4d4e9342016-12-07 19:25:02 -08003382 if (udev->speed >= USB_SPEED_SUPER)
3383 max_power = udev->actconfig->desc.bMaxPower * 8;
3384 else
3385 max_power = udev->actconfig->desc.bMaxPower * 2;
3386 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3387 dev_name(&udev->dev), max_power);
3388
3389 /* inform PMIC of max power so it can optimize boost */
3390 pval.intval = max_power * 1000;
3391 power_supply_set_property(mdwc->usb_psy,
3392 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3393 } else {
3394 pval.intval = 0;
3395 power_supply_set_property(mdwc->usb_psy,
3396 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003397
3398 /* set rate back to default core clk rate */
3399 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3400 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3401 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003402 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003403 }
3404 }
3405
3406 return NOTIFY_DONE;
3407}
3408
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303409static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3410{
3411 static bool curr_perf_mode;
3412 int latency = mdwc->pm_qos_latency;
3413
3414 if ((curr_perf_mode == perf_mode) || !latency)
3415 return;
3416
3417 if (perf_mode)
3418 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3419 else
3420 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3421 PM_QOS_DEFAULT_VALUE);
3422
3423 curr_perf_mode = perf_mode;
3424 pr_debug("%s: latency updated to: %d\n", __func__,
3425 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3426}
3427
3428static void msm_dwc3_perf_vote_work(struct work_struct *w)
3429{
3430 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3431 perf_vote_work.work);
3432 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3433 static unsigned long last_irq_cnt;
3434 bool in_perf_mode = false;
3435
3436 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3437 in_perf_mode = true;
3438
3439 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3440 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3441
3442 last_irq_cnt = dwc->irq_cnt;
3443 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3444 schedule_delayed_work(&mdwc->perf_vote_work,
3445 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3446}
3447
Mayank Rana511f3b22016-08-02 12:00:11 -07003448#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3449
3450/**
3451 * dwc3_otg_start_host - helper function for starting/stoping the host
3452 * controller driver.
3453 *
3454 * @mdwc: Pointer to the dwc3_msm structure.
3455 * @on: start / stop the host controller driver.
3456 *
3457 * Returns 0 on success otherwise negative errno.
3458 */
3459static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3460{
3461 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3462 int ret = 0;
3463
3464 if (!dwc->xhci)
3465 return -EINVAL;
3466
3467 /*
3468 * The vbus_reg pointer could have multiple values
3469 * NULL: regulator_get() hasn't been called, or was previously deferred
3470 * IS_ERR: regulator could not be obtained, so skip using it
3471 * Valid pointer otherwise
3472 */
3473 if (!mdwc->vbus_reg) {
3474 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3475 "vbus_dwc3");
3476 if (IS_ERR(mdwc->vbus_reg) &&
3477 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3478 /* regulators may not be ready, so retry again later */
3479 mdwc->vbus_reg = NULL;
3480 return -EPROBE_DEFER;
3481 }
3482 }
3483
3484 if (on) {
3485 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3486
Mayank Rana511f3b22016-08-02 12:00:11 -07003487 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003488 if (dwc->maximum_speed == USB_SPEED_SUPER)
3489 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3490
3491 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003492 dbg_event(0xFF, "StrtHost gync",
3493 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003494 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3495 if (!IS_ERR(mdwc->vbus_reg))
3496 ret = regulator_enable(mdwc->vbus_reg);
3497 if (ret) {
3498 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3499 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3500 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3501 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003502 dbg_event(0xFF, "vregerr psync",
3503 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003504 return ret;
3505 }
3506
3507 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3508
Jack Pham4d4e9342016-12-07 19:25:02 -08003509 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3510 usb_register_notify(&mdwc->host_nb);
3511
Mayank Rana511f3b22016-08-02 12:00:11 -07003512 /*
3513 * FIXME If micro A cable is disconnected during system suspend,
3514 * xhci platform device will be removed before runtime pm is
3515 * enabled for xhci device. Due to this, disable_depth becomes
3516 * greater than one and runtimepm is not enabled for next microA
3517 * connect. Fix this by calling pm_runtime_init for xhci device.
3518 */
3519 pm_runtime_init(&dwc->xhci->dev);
3520 ret = platform_device_add(dwc->xhci);
3521 if (ret) {
3522 dev_err(mdwc->dev,
3523 "%s: failed to add XHCI pdev ret=%d\n",
3524 __func__, ret);
3525 if (!IS_ERR(mdwc->vbus_reg))
3526 regulator_disable(mdwc->vbus_reg);
3527 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3528 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3529 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003530 dbg_event(0xFF, "pdeverr psync",
3531 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003532 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003533 return ret;
3534 }
3535
3536 /*
3537 * In some cases it is observed that USB PHY is not going into
3538 * suspend with host mode suspend functionality. Hence disable
3539 * XHCI's runtime PM here if disable_host_mode_pm is set.
3540 */
3541 if (mdwc->disable_host_mode_pm)
3542 pm_runtime_disable(&dwc->xhci->dev);
3543
3544 mdwc->in_host_mode = true;
3545 dwc3_usb3_phy_suspend(dwc, true);
3546
3547 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003548 dbg_event(0xFF, "StrtHost psync",
3549 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003550 pm_runtime_mark_last_busy(mdwc->dev);
3551 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303552#ifdef CONFIG_SMP
3553 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3554 mdwc->pm_qos_req_dma.irq = dwc->irq;
3555#endif
3556 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3557 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3558 /* start in perf mode for better performance initially */
3559 msm_dwc3_perf_vote_update(mdwc, true);
3560 schedule_delayed_work(&mdwc->perf_vote_work,
3561 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003562 } else {
3563 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3564
3565 if (!IS_ERR(mdwc->vbus_reg))
3566 ret = regulator_disable(mdwc->vbus_reg);
3567 if (ret) {
3568 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3569 return ret;
3570 }
3571
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303572 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3573 msm_dwc3_perf_vote_update(mdwc, false);
3574 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3575
Mayank Rana511f3b22016-08-02 12:00:11 -07003576 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003577 dbg_event(0xFF, "StopHost gsync",
3578 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003579 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3580 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3581 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3582 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003583 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003584
3585 /*
3586 * Perform USB hardware RESET (both core reset and DBM reset)
3587 * when moving from host to peripheral. This is required for
3588 * peripheral mode to work.
3589 */
3590 dwc3_msm_block_reset(mdwc, true);
3591
3592 dwc3_usb3_phy_suspend(dwc, false);
3593 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3594
3595 mdwc->in_host_mode = false;
3596
3597 /* re-init core and OTG registers as block reset clears these */
3598 dwc3_post_host_reset_core_init(dwc);
3599 pm_runtime_mark_last_busy(mdwc->dev);
3600 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003601 dbg_event(0xFF, "StopHost psync",
3602 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003603 }
3604
3605 return 0;
3606}
3607
3608static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3609{
3610 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3611
3612 /* Update OTG VBUS Valid from HSPHY to controller */
3613 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3614 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3615 UTMI_OTG_VBUS_VALID,
3616 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3617
3618 /* Update only if Super Speed is supported */
3619 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3620 /* Update VBUS Valid from SSPHY to controller */
3621 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3622 LANE0_PWR_PRESENT,
3623 vbus_present ? LANE0_PWR_PRESENT : 0);
3624 }
3625}
3626
3627/**
3628 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3629 *
3630 * @mdwc: Pointer to the dwc3_msm structure.
3631 * @on: Turn ON/OFF the gadget.
3632 *
3633 * Returns 0 on success otherwise negative errno.
3634 */
3635static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3636{
3637 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3638
3639 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003640 dbg_event(0xFF, "StrtGdgt gsync",
3641 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003642
3643 if (on) {
3644 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3645 __func__, dwc->gadget.name);
3646
3647 dwc3_override_vbus_status(mdwc, true);
3648 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3649 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3650
3651 /*
3652 * Core reset is not required during start peripheral. Only
3653 * DBM reset is required, hence perform only DBM reset here.
3654 */
3655 dwc3_msm_block_reset(mdwc, false);
3656
3657 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3658 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303659#ifdef CONFIG_SMP
3660 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3661 mdwc->pm_qos_req_dma.irq = dwc->irq;
3662#endif
3663 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3664 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3665 /* start in perf mode for better performance initially */
3666 msm_dwc3_perf_vote_update(mdwc, true);
3667 schedule_delayed_work(&mdwc->perf_vote_work,
3668 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003669 } else {
3670 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3671 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303672 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3673 msm_dwc3_perf_vote_update(mdwc, false);
3674 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3675
Mayank Rana511f3b22016-08-02 12:00:11 -07003676 usb_gadget_vbus_disconnect(&dwc->gadget);
3677 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3678 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3679 dwc3_override_vbus_status(mdwc, false);
3680 dwc3_usb3_phy_suspend(dwc, false);
3681 }
3682
3683 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003684 dbg_event(0xFF, "StopGdgt psync",
3685 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003686
3687 return 0;
3688}
3689
3690static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3691{
Jack Pham8caff352016-08-19 16:33:55 -07003692 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003693 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003694
3695 if (mdwc->charging_disabled)
3696 return 0;
3697
3698 if (mdwc->max_power == mA)
3699 return 0;
3700
3701 if (!mdwc->usb_psy) {
3702 mdwc->usb_psy = power_supply_get_by_name("usb");
3703 if (!mdwc->usb_psy) {
3704 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3705 return -ENODEV;
3706 }
3707 }
3708
Jack Pham8caff352016-08-19 16:33:55 -07003709 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3710 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3711 return 0;
3712
Mayank Rana511f3b22016-08-02 12:00:11 -07003713 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3714
Mayank Rana511f3b22016-08-02 12:00:11 -07003715 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003716 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003717 ret = power_supply_set_property(mdwc->usb_psy,
3718 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3719 if (ret) {
3720 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3721 return ret;
3722 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003723
3724 mdwc->max_power = mA;
3725 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003726}
3727
3728
3729/**
3730 * dwc3_otg_sm_work - workqueue function.
3731 *
3732 * @w: Pointer to the dwc3 otg workqueue
3733 *
3734 * NOTE: After any change in otg_state, we must reschdule the state machine.
3735 */
3736static void dwc3_otg_sm_work(struct work_struct *w)
3737{
3738 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3739 struct dwc3 *dwc = NULL;
3740 bool work = 0;
3741 int ret = 0;
3742 unsigned long delay = 0;
3743 const char *state;
3744
3745 if (mdwc->dwc3)
3746 dwc = platform_get_drvdata(mdwc->dwc3);
3747
3748 if (!dwc) {
3749 dev_err(mdwc->dev, "dwc is NULL.\n");
3750 return;
3751 }
3752
3753 state = usb_otg_state_string(mdwc->otg_state);
3754 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003755 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003756
3757 /* Check OTG state */
3758 switch (mdwc->otg_state) {
3759 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08003760 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07003761 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08003762 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
3763 dbg_event(0xFF, "undef_id_!bsv", 0);
3764 pm_runtime_set_active(mdwc->dev);
3765 pm_runtime_enable(mdwc->dev);
3766 pm_runtime_get_noresume(mdwc->dev);
3767 dwc3_msm_resume(mdwc);
3768 pm_runtime_put_sync(mdwc->dev);
3769 dbg_event(0xFF, "Undef NoUSB",
3770 atomic_read(&mdwc->dev->power.usage_count));
3771 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003772 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08003773 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003774
Mayank Rana08e41922017-03-02 15:25:48 -08003775 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003776 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08003777 pm_runtime_set_suspended(mdwc->dev);
3778 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003779 /* fall-through */
3780 case OTG_STATE_B_IDLE:
3781 if (!test_bit(ID, &mdwc->inputs)) {
3782 dev_dbg(mdwc->dev, "!id\n");
3783 mdwc->otg_state = OTG_STATE_A_IDLE;
3784 work = 1;
3785 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3786 dev_dbg(mdwc->dev, "b_sess_vld\n");
3787 /*
3788 * Increment pm usage count upon cable connect. Count
3789 * is decremented in OTG_STATE_B_PERIPHERAL state on
3790 * cable disconnect or in bus suspend.
3791 */
3792 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003793 dbg_event(0xFF, "BIDLE gsync",
3794 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003795 dwc3_otg_start_peripheral(mdwc, 1);
3796 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3797 work = 1;
3798 } else {
3799 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3800 dev_dbg(mdwc->dev, "Cable disconnected\n");
3801 }
3802 break;
3803
3804 case OTG_STATE_B_PERIPHERAL:
3805 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3806 !test_bit(ID, &mdwc->inputs)) {
3807 dev_dbg(mdwc->dev, "!id || !bsv\n");
3808 mdwc->otg_state = OTG_STATE_B_IDLE;
3809 dwc3_otg_start_peripheral(mdwc, 0);
3810 /*
3811 * Decrement pm usage count upon cable disconnect
3812 * which was incremented upon cable connect in
3813 * OTG_STATE_B_IDLE state
3814 */
3815 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003816 dbg_event(0xFF, "!BSV psync",
3817 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003818 work = 1;
3819 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3820 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3821 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3822 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3823 /*
3824 * Decrement pm usage count upon bus suspend.
3825 * Count was incremented either upon cable
3826 * connect in OTG_STATE_B_IDLE or host
3827 * initiated resume after bus suspend in
3828 * OTG_STATE_B_SUSPEND state
3829 */
3830 pm_runtime_mark_last_busy(mdwc->dev);
3831 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003832 dbg_event(0xFF, "SUSP put",
3833 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003834 }
3835 break;
3836
3837 case OTG_STATE_B_SUSPEND:
3838 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3839 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3840 mdwc->otg_state = OTG_STATE_B_IDLE;
3841 dwc3_otg_start_peripheral(mdwc, 0);
3842 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3843 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3844 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3845 /*
3846 * Increment pm usage count upon host
3847 * initiated resume. Count was decremented
3848 * upon bus suspend in
3849 * OTG_STATE_B_PERIPHERAL state.
3850 */
3851 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003852 dbg_event(0xFF, "!SUSP gsync",
3853 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003854 }
3855 break;
3856
3857 case OTG_STATE_A_IDLE:
3858 /* Switch to A-Device*/
3859 if (test_bit(ID, &mdwc->inputs)) {
3860 dev_dbg(mdwc->dev, "id\n");
3861 mdwc->otg_state = OTG_STATE_B_IDLE;
3862 mdwc->vbus_retry_count = 0;
3863 work = 1;
3864 } else {
3865 mdwc->otg_state = OTG_STATE_A_HOST;
3866 ret = dwc3_otg_start_host(mdwc, 1);
3867 if ((ret == -EPROBE_DEFER) &&
3868 mdwc->vbus_retry_count < 3) {
3869 /*
3870 * Get regulator failed as regulator driver is
3871 * not up yet. Will try to start host after 1sec
3872 */
3873 mdwc->otg_state = OTG_STATE_A_IDLE;
3874 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3875 delay = VBUS_REG_CHECK_DELAY;
3876 work = 1;
3877 mdwc->vbus_retry_count++;
3878 } else if (ret) {
3879 dev_err(mdwc->dev, "unable to start host\n");
3880 mdwc->otg_state = OTG_STATE_A_IDLE;
3881 goto ret;
3882 }
3883 }
3884 break;
3885
3886 case OTG_STATE_A_HOST:
3887 if (test_bit(ID, &mdwc->inputs)) {
3888 dev_dbg(mdwc->dev, "id\n");
3889 dwc3_otg_start_host(mdwc, 0);
3890 mdwc->otg_state = OTG_STATE_B_IDLE;
3891 mdwc->vbus_retry_count = 0;
3892 work = 1;
3893 } else {
3894 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003895 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003896 if (dwc)
3897 pm_runtime_resume(&dwc->xhci->dev);
3898 }
3899 break;
3900
3901 default:
3902 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3903
3904 }
3905
3906 if (work)
3907 schedule_delayed_work(&mdwc->sm_work, delay);
3908
3909ret:
3910 return;
3911}
3912
3913#ifdef CONFIG_PM_SLEEP
3914static int dwc3_msm_pm_suspend(struct device *dev)
3915{
3916 int ret = 0;
3917 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3918 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3919
3920 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003921 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003922
3923 flush_workqueue(mdwc->dwc3_wq);
3924 if (!atomic_read(&dwc->in_lpm)) {
3925 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3926 return -EBUSY;
3927 }
3928
3929 ret = dwc3_msm_suspend(mdwc);
3930 if (!ret)
3931 atomic_set(&mdwc->pm_suspended, 1);
3932
3933 return ret;
3934}
3935
3936static int dwc3_msm_pm_resume(struct device *dev)
3937{
3938 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003939 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003940
3941 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003942 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003943
Mayank Rana511f3b22016-08-02 12:00:11 -07003944 /* flush to avoid race in read/write of pm_suspended */
3945 flush_workqueue(mdwc->dwc3_wq);
3946 atomic_set(&mdwc->pm_suspended, 0);
3947
3948 /* kick in otg state machine */
3949 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3950
3951 return 0;
3952}
3953#endif
3954
3955#ifdef CONFIG_PM
3956static int dwc3_msm_runtime_idle(struct device *dev)
3957{
Mayank Rana08e41922017-03-02 15:25:48 -08003958 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3959 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3960
Mayank Rana511f3b22016-08-02 12:00:11 -07003961 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003962 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003963
3964 return 0;
3965}
3966
3967static int dwc3_msm_runtime_suspend(struct device *dev)
3968{
3969 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003970 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003971
3972 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003973 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003974
3975 return dwc3_msm_suspend(mdwc);
3976}
3977
3978static int dwc3_msm_runtime_resume(struct device *dev)
3979{
3980 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003981 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003982
3983 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003984 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003985
3986 return dwc3_msm_resume(mdwc);
3987}
3988#endif
3989
3990static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3991 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3992 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3993 dwc3_msm_runtime_idle)
3994};
3995
3996static const struct of_device_id of_dwc3_matach[] = {
3997 {
3998 .compatible = "qcom,dwc-usb3-msm",
3999 },
4000 { },
4001};
4002MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4003
4004static struct platform_driver dwc3_msm_driver = {
4005 .probe = dwc3_msm_probe,
4006 .remove = dwc3_msm_remove,
4007 .driver = {
4008 .name = "msm-dwc3",
4009 .pm = &dwc3_msm_dev_pm_ops,
4010 .of_match_table = of_dwc3_matach,
4011 },
4012};
4013
4014MODULE_LICENSE("GPL v2");
4015MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4016
4017static int dwc3_msm_init(void)
4018{
4019 return platform_driver_register(&dwc3_msm_driver);
4020}
4021module_init(dwc3_msm_init);
4022
4023static void __exit dwc3_msm_exit(void)
4024{
4025 platform_driver_unregister(&dwc3_msm_driver);
4026}
4027module_exit(dwc3_msm_exit);