blob: 47f657143bf9b7143d5a3381a6519963c7c7fef1 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
58/* time out to wait for USB cable status notification (in ms)*/
59#define SM_INIT_TIMEOUT 30000
60
61/* AHB2PHY register offsets */
62#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
63
64/* AHB2PHY read/write waite value */
65#define ONE_READ_WRITE_WAIT 0x11
66
67/* cpu to fix usb interrupt */
68static int cpu_to_affin;
69module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
70MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
71
72/* XHCI registers */
73#define USB3_HCSPARAMS1 (0x4)
74#define USB3_PORTSC (0x420)
75
76/**
77 * USB QSCRATCH Hardware registers
78 *
79 */
80#define QSCRATCH_REG_OFFSET (0x000F8800)
81#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
82#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
83#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
84#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
85
86#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
87#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
88#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
89#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
90#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
91
92/* QSCRATCH_GENERAL_CFG register bit offset */
93#define PIPE_UTMI_CLK_SEL BIT(0)
94#define PIPE3_PHYSTATUS_SW BIT(3)
95#define PIPE_UTMI_CLK_DIS BIT(8)
96
97#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
98#define UTMI_OTG_VBUS_VALID BIT(20)
99#define SW_SESSVLD_SEL BIT(28)
100
101#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
102#define LANE0_PWR_PRESENT BIT(24)
103
104/* GSI related registers */
105#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
106#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
107
108#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
109#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
110#define GSI_CLK_EN_MASK BIT(12)
111#define BLOCK_GSI_WR_GO_MASK BIT(1)
112#define GSI_EN_MASK BIT(0)
113
114#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
115#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
116#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
117#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
118
119#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
120#define GSI_WR_CTRL_STATE_MASK BIT(15)
121
Mayank Ranaf4918d32016-12-15 13:35:55 -0800122#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
123#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
124#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
125#define DWC3_GEVENT_TYPE_GSI 0x3
126
Mayank Rana511f3b22016-08-02 12:00:11 -0700127struct dwc3_msm_req_complete {
128 struct list_head list_item;
129 struct usb_request *req;
130 void (*orig_complete)(struct usb_ep *ep,
131 struct usb_request *req);
132};
133
134enum dwc3_id_state {
135 DWC3_ID_GROUND = 0,
136 DWC3_ID_FLOAT,
137};
138
139/* for type c cable */
140enum plug_orientation {
141 ORIENTATION_NONE,
142 ORIENTATION_CC1,
143 ORIENTATION_CC2,
144};
145
146/* Input bits to state machine (mdwc->inputs) */
147
148#define ID 0
149#define B_SESS_VLD 1
150#define B_SUSPEND 2
151
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530152#define PM_QOS_SAMPLE_SEC 2
153#define PM_QOS_THRESHOLD 400
154
Mayank Rana511f3b22016-08-02 12:00:11 -0700155struct dwc3_msm {
156 struct device *dev;
157 void __iomem *base;
158 void __iomem *ahb2phy_base;
159 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700160 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700161 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
162 struct list_head req_complete_list;
163 struct clk *xo_clk;
164 struct clk *core_clk;
165 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800166 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700167 struct clk *iface_clk;
168 struct clk *sleep_clk;
169 struct clk *utmi_clk;
170 unsigned int utmi_clk_rate;
171 struct clk *utmi_clk_src;
172 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530173 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700174 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530175 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700176 struct regulator *dwc3_gdsc;
177
178 struct usb_phy *hs_phy, *ss_phy;
179
180 struct dbm *dbm;
181
182 /* VBUS regulator for host mode */
183 struct regulator *vbus_reg;
184 int vbus_retry_count;
185 bool resume_pending;
186 atomic_t pm_suspended;
187 int hs_phy_irq;
188 int ss_phy_irq;
189 struct work_struct resume_work;
190 struct work_struct restart_usb_work;
191 bool in_restart;
192 struct workqueue_struct *dwc3_wq;
193 struct delayed_work sm_work;
194 unsigned long inputs;
195 unsigned int max_power;
196 bool charging_disabled;
197 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700198 struct work_struct bus_vote_w;
199 unsigned int bus_vote;
200 u32 bus_perf_client;
201 struct msm_bus_scale_pdata *bus_scale_table;
202 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700203 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800205 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 unsigned int tx_fifo_size;
207 bool vbus_active;
208 bool suspend;
209 bool disable_host_mode_pm;
210 enum dwc3_id_state id_state;
211 unsigned long lpm_flags;
212#define MDWC3_SS_PHY_SUSPEND BIT(0)
213#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
214#define MDWC3_POWER_COLLAPSE BIT(2)
215
216 unsigned int irq_to_affin;
217 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530218 struct notifier_block usbdev_nb;
219 bool hc_died;
Mayank Rana511f3b22016-08-02 12:00:11 -0700220
221 struct extcon_dev *extcon_vbus;
222 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800223 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700224 struct notifier_block vbus_nb;
225 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800226 struct notifier_block eud_event_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227
Jack Pham4d4e9342016-12-07 19:25:02 -0800228 struct notifier_block host_nb;
229
Mayank Rana511f3b22016-08-02 12:00:11 -0700230 int pwr_event_irq;
231 atomic_t in_p3;
232 unsigned int lpm_to_suspend_delay;
233 bool init;
234 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800235 u32 num_gsi_event_buffers;
236 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530237 int pm_qos_latency;
238 struct pm_qos_request pm_qos_req_dma;
239 struct delayed_work perf_vote_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700240};
241
242#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
243#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
244#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
245
246#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
247#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
248#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
249
250#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
251#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
252#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
253
254#define DSTS_CONNECTSPD_SS 0x4
255
256
257static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
258static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800259static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana511f3b22016-08-02 12:00:11 -0700260/**
261 *
262 * Read register with debug info.
263 *
264 * @base - DWC3 base virtual address.
265 * @offset - register offset.
266 *
267 * @return u32
268 */
269static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
270{
271 u32 val = ioread32(base + offset);
272 return val;
273}
274
275/**
276 * Read register masked field with debug info.
277 *
278 * @base - DWC3 base virtual address.
279 * @offset - register offset.
280 * @mask - register bitmask.
281 *
282 * @return u32
283 */
284static inline u32 dwc3_msm_read_reg_field(void *base,
285 u32 offset,
286 const u32 mask)
287{
288 u32 shift = find_first_bit((void *)&mask, 32);
289 u32 val = ioread32(base + offset);
290
291 val &= mask; /* clear other bits */
292 val >>= shift;
293 return val;
294}
295
296/**
297 *
298 * Write register with debug info.
299 *
300 * @base - DWC3 base virtual address.
301 * @offset - register offset.
302 * @val - value to write.
303 *
304 */
305static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
306{
307 iowrite32(val, base + offset);
308}
309
310/**
311 * Write register masked field with debug info.
312 *
313 * @base - DWC3 base virtual address.
314 * @offset - register offset.
315 * @mask - register bitmask.
316 * @val - value to write.
317 *
318 */
319static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
320 const u32 mask, u32 val)
321{
322 u32 shift = find_first_bit((void *)&mask, 32);
323 u32 tmp = ioread32(base + offset);
324
325 tmp &= ~mask; /* clear written bits */
326 val = tmp | (val << shift);
327 iowrite32(val, base + offset);
328}
329
330/**
331 * Write register and read back masked value to confirm it is written
332 *
333 * @base - DWC3 base virtual address.
334 * @offset - register offset.
335 * @mask - register bitmask specifying what should be updated
336 * @val - value to write.
337 *
338 */
339static inline void dwc3_msm_write_readback(void *base, u32 offset,
340 const u32 mask, u32 val)
341{
342 u32 write_val, tmp = ioread32(base + offset);
343
344 tmp &= ~mask; /* retain other bits */
345 write_val = tmp | val;
346
347 iowrite32(write_val, base + offset);
348
349 /* Read back to see if val was written */
350 tmp = ioread32(base + offset);
351 tmp &= mask; /* clear other bits */
352
353 if (tmp != val)
354 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
355 __func__, val, offset);
356}
357
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800358static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
359{
360 int i, num_ports;
361 u32 reg;
362
363 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
364 num_ports = HCS_MAX_PORTS(reg);
365
366 for (i = 0; i < num_ports; i++) {
367 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
368 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
369 return true;
370 }
371
372 return false;
373}
374
Mayank Rana511f3b22016-08-02 12:00:11 -0700375static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
376{
377 int i, num_ports;
378 u32 reg;
379
380 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
381 num_ports = HCS_MAX_PORTS(reg);
382
383 for (i = 0; i < num_ports; i++) {
384 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
385 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
386 return true;
387 }
388
389 return false;
390}
391
392static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
393{
394 u8 speed;
395
396 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
397 return !!(speed & DSTS_CONNECTSPD_SS);
398}
399
400static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
401{
402 if (mdwc->in_host_mode)
403 return dwc3_msm_is_host_superspeed(mdwc);
404
405 return dwc3_msm_is_dev_superspeed(mdwc);
406}
407
408#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
409/**
410 * Configure the DBM with the BAM's data fifo.
411 * This function is called by the USB BAM Driver
412 * upon initialization.
413 *
414 * @ep - pointer to usb endpoint.
415 * @addr - address of data fifo.
416 * @size - size of data fifo.
417 *
418 */
419int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
420 u32 size, u8 dst_pipe_idx)
421{
422 struct dwc3_ep *dep = to_dwc3_ep(ep);
423 struct dwc3 *dwc = dep->dwc;
424 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
425
426 dev_dbg(mdwc->dev, "%s\n", __func__);
427
428 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
429 dst_pipe_idx);
430}
431
432
433/**
434* Cleanups for msm endpoint on request complete.
435*
436* Also call original request complete.
437*
438* @usb_ep - pointer to usb_ep instance.
439* @request - pointer to usb_request instance.
440*
441* @return int - 0 on success, negative on error.
442*/
443static void dwc3_msm_req_complete_func(struct usb_ep *ep,
444 struct usb_request *request)
445{
446 struct dwc3_ep *dep = to_dwc3_ep(ep);
447 struct dwc3 *dwc = dep->dwc;
448 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
449 struct dwc3_msm_req_complete *req_complete = NULL;
450
451 /* Find original request complete function and remove it from list */
452 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
453 if (req_complete->req == request)
454 break;
455 }
456 if (!req_complete || req_complete->req != request) {
457 dev_err(dep->dwc->dev, "%s: could not find the request\n",
458 __func__);
459 return;
460 }
461 list_del(&req_complete->list_item);
462
463 /*
464 * Release another one TRB to the pool since DBM queue took 2 TRBs
465 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
466 * released only one.
467 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700468 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700469
470 /* Unconfigure dbm ep */
471 dbm_ep_unconfig(mdwc->dbm, dep->number);
472
473 /*
474 * If this is the last endpoint we unconfigured, than reset also
475 * the event buffers; unless unconfiguring the ep due to lpm,
476 * in which case the event buffer only gets reset during the
477 * block reset.
478 */
479 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
480 !dbm_reset_ep_after_lpm(mdwc->dbm))
481 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
482
483 /*
484 * Call original complete function, notice that dwc->lock is already
485 * taken by the caller of this function (dwc3_gadget_giveback()).
486 */
487 request->complete = req_complete->orig_complete;
488 if (request->complete)
489 request->complete(ep, request);
490
491 kfree(req_complete);
492}
493
494
495/**
496* Helper function
497*
498* Reset DBM endpoint.
499*
500* @mdwc - pointer to dwc3_msm instance.
501* @dep - pointer to dwc3_ep instance.
502*
503* @return int - 0 on success, negative on error.
504*/
505static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
506{
507 int ret;
508
509 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
510
511 /* Reset the dbm endpoint */
512 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
513 if (ret) {
514 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
515 __func__);
516 return ret;
517 }
518
519 /*
520 * The necessary delay between asserting and deasserting the dbm ep
521 * reset is based on the number of active endpoints. If there is more
522 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
523 * delay will suffice.
524 */
525 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
526 usleep_range(1000, 1200);
527 else
528 udelay(10);
529 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
530 if (ret) {
531 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
532 __func__);
533 return ret;
534 }
535
536 return 0;
537}
538
539/**
540* Reset the DBM endpoint which is linked to the given USB endpoint.
541*
542* @usb_ep - pointer to usb_ep instance.
543*
544* @return int - 0 on success, negative on error.
545*/
546
547int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
548{
549 struct dwc3_ep *dep = to_dwc3_ep(ep);
550 struct dwc3 *dwc = dep->dwc;
551 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
552
553 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
554}
555EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
556
557
558/**
559* Helper function.
560* See the header of the dwc3_msm_ep_queue function.
561*
562* @dwc3_ep - pointer to dwc3_ep instance.
563* @req - pointer to dwc3_request instance.
564*
565* @return int - 0 on success, negative on error.
566*/
567static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
568{
569 struct dwc3_trb *trb;
570 struct dwc3_trb *trb_link;
571 struct dwc3_gadget_ep_cmd_params params;
572 u32 cmd;
573 int ret = 0;
574
Mayank Rana83ad5822016-08-09 14:17:22 -0700575 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700576 * this request is issued with start transfer. The request will be out
577 * from this list in 2 cases. The first is that the transfer will be
578 * completed (not if the transfer is endless using a circular TRBs with
579 * with link TRB). The second case is an option to do stop stransfer,
580 * this can be initiated by the function driver when calling dequeue.
581 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700582 req->started = true;
583 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700584
585 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700586 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
587 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700588 memset(trb, 0, sizeof(*trb));
589
590 req->trb = trb;
591 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
592 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
593 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
594 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
595 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
596
597 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700598 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
599 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700600 memset(trb_link, 0, sizeof(*trb_link));
601
602 trb_link->bpl = lower_32_bits(req->trb_dma);
603 trb_link->bph = DBM_TRB_BIT |
604 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
605 trb_link->size = 0;
606 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
607
608 /*
609 * Now start the transfer
610 */
611 memset(&params, 0, sizeof(params));
612 params.param0 = 0; /* TDAddr High */
613 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
614
615 /* DBM requires IOC to be set */
616 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700617 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700618 if (ret < 0) {
619 dev_dbg(dep->dwc->dev,
620 "%s: failed to send STARTTRANSFER command\n",
621 __func__);
622
623 list_del(&req->list);
624 return ret;
625 }
626 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700627 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700628
629 return ret;
630}
631
632/**
633* Queue a usb request to the DBM endpoint.
634* This function should be called after the endpoint
635* was enabled by the ep_enable.
636*
637* This function prepares special structure of TRBs which
638* is familiar with the DBM HW, so it will possible to use
639* this endpoint in DBM mode.
640*
641* The TRBs prepared by this function, is one normal TRB
642* which point to a fake buffer, followed by a link TRB
643* that points to the first TRB.
644*
645* The API of this function follow the regular API of
646* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
647*
648* @usb_ep - pointer to usb_ep instance.
649* @request - pointer to usb_request instance.
650* @gfp_flags - possible flags.
651*
652* @return int - 0 on success, negative on error.
653*/
654static int dwc3_msm_ep_queue(struct usb_ep *ep,
655 struct usb_request *request, gfp_t gfp_flags)
656{
657 struct dwc3_request *req = to_dwc3_request(request);
658 struct dwc3_ep *dep = to_dwc3_ep(ep);
659 struct dwc3 *dwc = dep->dwc;
660 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
661 struct dwc3_msm_req_complete *req_complete;
662 unsigned long flags;
663 int ret = 0, size;
664 u8 bam_pipe;
665 bool producer;
666 bool disable_wb;
667 bool internal_mem;
668 bool ioc;
669 bool superspeed;
670
671 if (!(request->udc_priv & MSM_SPS_MODE)) {
672 /* Not SPS mode, call original queue */
673 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
674 __func__);
675
676 return (mdwc->original_ep_ops[dep->number])->queue(ep,
677 request,
678 gfp_flags);
679 }
680
681 /* HW restriction regarding TRB size (8KB) */
682 if (req->request.length < 0x2000) {
683 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
684 return -EINVAL;
685 }
686
687 /*
688 * Override req->complete function, but before doing that,
689 * store it's original pointer in the req_complete_list.
690 */
691 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
692 if (!req_complete)
693 return -ENOMEM;
694
695 req_complete->req = request;
696 req_complete->orig_complete = request->complete;
697 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
698 request->complete = dwc3_msm_req_complete_func;
699
700 /*
701 * Configure the DBM endpoint
702 */
703 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
704 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
705 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
706 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
707 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
708
709 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
710 disable_wb, internal_mem, ioc);
711 if (ret < 0) {
712 dev_err(mdwc->dev,
713 "error %d after calling dbm_ep_config\n", ret);
714 return ret;
715 }
716
717 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
718 __func__, request, ep->name, request->length);
719 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
720 dbm_event_buffer_config(mdwc->dbm,
721 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
722 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
723 DWC3_GEVNTSIZ_SIZE(size));
724
725 /*
726 * We must obtain the lock of the dwc3 core driver,
727 * including disabling interrupts, so we will be sure
728 * that we are the only ones that configure the HW device
729 * core and ensure that we queuing the request will finish
730 * as soon as possible so we will release back the lock.
731 */
732 spin_lock_irqsave(&dwc->lock, flags);
733 if (!dep->endpoint.desc) {
734 dev_err(mdwc->dev,
735 "%s: trying to queue request %p to disabled ep %s\n",
736 __func__, request, ep->name);
737 ret = -EPERM;
738 goto err;
739 }
740
741 if (dep->number == 0 || dep->number == 1) {
742 dev_err(mdwc->dev,
743 "%s: trying to queue dbm request %p to control ep %s\n",
744 __func__, request, ep->name);
745 ret = -EPERM;
746 goto err;
747 }
748
749
Mayank Rana83ad5822016-08-09 14:17:22 -0700750 if (dep->trb_dequeue != dep->trb_enqueue ||
751 !list_empty(&dep->pending_list)
752 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700753 dev_err(mdwc->dev,
754 "%s: trying to queue dbm request %p tp ep %s\n",
755 __func__, request, ep->name);
756 ret = -EPERM;
757 goto err;
758 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700759 dep->trb_dequeue = 0;
760 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700761 }
762
763 ret = __dwc3_msm_ep_queue(dep, req);
764 if (ret < 0) {
765 dev_err(mdwc->dev,
766 "error %d after calling __dwc3_msm_ep_queue\n", ret);
767 goto err;
768 }
769
770 spin_unlock_irqrestore(&dwc->lock, flags);
771 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
772 dbm_set_speed(mdwc->dbm, (u8)superspeed);
773
774 return 0;
775
776err:
777 spin_unlock_irqrestore(&dwc->lock, flags);
778 kfree(req_complete);
779 return ret;
780}
781
782/*
783* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
784*
785* @usb_ep - pointer to usb_ep instance.
786*
787* @return int - XferRscIndex
788*/
789static inline int gsi_get_xfer_index(struct usb_ep *ep)
790{
791 struct dwc3_ep *dep = to_dwc3_ep(ep);
792
793 return dep->resource_index;
794}
795
796/*
797* Fills up the GSI channel information needed in call to IPA driver
798* for GSI channel creation.
799*
800* @usb_ep - pointer to usb_ep instance.
801* @ch_info - output parameter with requested channel info
802*/
803static void gsi_get_channel_info(struct usb_ep *ep,
804 struct gsi_channel_info *ch_info)
805{
806 struct dwc3_ep *dep = to_dwc3_ep(ep);
807 int last_trb_index = 0;
808 struct dwc3 *dwc = dep->dwc;
809 struct usb_gsi_request *request = ch_info->ch_req;
810
811 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
812 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700813 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700814 ch_info->depcmd_hi_addr = 0;
815
816 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
817 &dep->trb_pool[0]);
818 /* Convert to multipled of 1KB */
819 ch_info->const_buffer_size = request->buf_len/1024;
820
821 /* IN direction */
822 if (dep->direction) {
823 /*
824 * Multiply by size of each TRB for xfer_ring_len in bytes.
825 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
826 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
827 */
828 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
829 last_trb_index = 2 * request->num_bufs + 2;
830 } else { /* OUT direction */
831 /*
832 * Multiply by size of each TRB for xfer_ring_len in bytes.
833 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
834 * LINK TRB.
835 */
836 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
837 last_trb_index = request->num_bufs + 1;
838 }
839
840 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
841 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
842 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
843 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
844 DWC3_GEVNTCOUNT(ep->ep_intr_num));
845 ch_info->gevntcount_hi_addr = 0;
846
847 dev_dbg(dwc->dev,
848 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
849 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
850 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
851}
852
853/*
854* Perform StartXfer on GSI EP. Stores XferRscIndex.
855*
856* @usb_ep - pointer to usb_ep instance.
857*
858* @return int - 0 on success
859*/
860static int gsi_startxfer_for_ep(struct usb_ep *ep)
861{
862 int ret;
863 struct dwc3_gadget_ep_cmd_params params;
864 u32 cmd;
865 struct dwc3_ep *dep = to_dwc3_ep(ep);
866 struct dwc3 *dwc = dep->dwc;
867
868 memset(&params, 0, sizeof(params));
869 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
870 params.param0 |= (ep->ep_intr_num << 16);
871 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
872 &dep->trb_pool[0]));
873 cmd = DWC3_DEPCMD_STARTTRANSFER;
874 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700875 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700876
877 if (ret < 0)
878 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700879 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700880 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
881 return ret;
882}
883
884/*
885* Store Ring Base and Doorbell Address for GSI EP
886* for GSI channel creation.
887*
888* @usb_ep - pointer to usb_ep instance.
889* @dbl_addr - Doorbell address obtained from IPA driver
890*/
891static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
892{
893 struct dwc3_ep *dep = to_dwc3_ep(ep);
894 struct dwc3 *dwc = dep->dwc;
895 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
896 int n = ep->ep_intr_num - 1;
897
898 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
899 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
900 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
901
902 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
903 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
904 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
905 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
906}
907
908/*
909* Rings Doorbell for IN GSI Channel
910*
911* @usb_ep - pointer to usb_ep instance.
912* @request - pointer to GSI request. This is used to pass in the
913* address of the GSI doorbell obtained from IPA driver
914*/
915static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
916{
917 void __iomem *gsi_dbl_address_lsb;
918 void __iomem *gsi_dbl_address_msb;
919 dma_addr_t offset;
920 u64 dbl_addr = *((u64 *)request->buf_base_addr);
921 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
922 u32 dbl_hi_addr = (dbl_addr >> 32);
923 u32 num_trbs = (request->num_bufs * 2 + 2);
924 struct dwc3_ep *dep = to_dwc3_ep(ep);
925 struct dwc3 *dwc = dep->dwc;
926 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
927
928 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
929 dbl_lo_addr, sizeof(u32));
930 if (!gsi_dbl_address_lsb)
931 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
932
933 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
934 dbl_hi_addr, sizeof(u32));
935 if (!gsi_dbl_address_msb)
936 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
937
938 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
939 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
940 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
941
942 writel_relaxed(offset, gsi_dbl_address_lsb);
943 writel_relaxed(0, gsi_dbl_address_msb);
944}
945
946/*
947* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
948*
949* @usb_ep - pointer to usb_ep instance.
950* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
951*
952* @return int - 0 on success
953*/
954static int gsi_updatexfer_for_ep(struct usb_ep *ep,
955 struct usb_gsi_request *request)
956{
957 int i;
958 int ret;
959 u32 cmd;
960 int num_trbs = request->num_bufs + 1;
961 struct dwc3_trb *trb;
962 struct dwc3_gadget_ep_cmd_params params;
963 struct dwc3_ep *dep = to_dwc3_ep(ep);
964 struct dwc3 *dwc = dep->dwc;
965
966 for (i = 0; i < num_trbs - 1; i++) {
967 trb = &dep->trb_pool[i];
968 trb->ctrl |= DWC3_TRB_CTRL_HWO;
969 }
970
971 memset(&params, 0, sizeof(params));
972 cmd = DWC3_DEPCMD_UPDATETRANSFER;
973 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700974 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700975 dep->flags |= DWC3_EP_BUSY;
976 if (ret < 0)
977 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
978 return ret;
979}
980
981/*
982* Perform EndXfer on particular GSI EP.
983*
984* @usb_ep - pointer to usb_ep instance.
985*/
986static void gsi_endxfer_for_ep(struct usb_ep *ep)
987{
988 struct dwc3_ep *dep = to_dwc3_ep(ep);
989 struct dwc3 *dwc = dep->dwc;
990
991 dwc3_stop_active_transfer(dwc, dep->number, true);
992}
993
994/*
995* Allocates and configures TRBs for GSI EPs.
996*
997* @usb_ep - pointer to usb_ep instance.
998* @request - pointer to GSI request.
999*
1000* @return int - 0 on success
1001*/
1002static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1003{
1004 int i = 0;
1005 dma_addr_t buffer_addr = req->dma;
1006 struct dwc3_ep *dep = to_dwc3_ep(ep);
1007 struct dwc3 *dwc = dep->dwc;
1008 struct dwc3_trb *trb;
1009 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
1010 : (req->num_bufs + 1);
1011
Jack Phambbe27962017-03-23 18:42:26 -07001012 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001013 num_trbs * sizeof(struct dwc3_trb),
1014 num_trbs * sizeof(struct dwc3_trb), 0);
1015 if (!dep->trb_dma_pool) {
1016 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1017 dep->name);
1018 return -ENOMEM;
1019 }
1020
1021 dep->num_trbs = num_trbs;
1022
1023 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1024 GFP_KERNEL, &dep->trb_pool_dma);
1025 if (!dep->trb_pool) {
1026 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1027 dep->name);
1028 return -ENOMEM;
1029 }
1030
1031 /* IN direction */
1032 if (dep->direction) {
1033 for (i = 0; i < num_trbs ; i++) {
1034 trb = &dep->trb_pool[i];
1035 memset(trb, 0, sizeof(*trb));
1036 /* Set up first n+1 TRBs for ZLPs */
1037 if (i < (req->num_bufs + 1)) {
1038 trb->bpl = 0;
1039 trb->bph = 0;
1040 trb->size = 0;
1041 trb->ctrl = DWC3_TRBCTL_NORMAL
1042 | DWC3_TRB_CTRL_IOC;
1043 continue;
1044 }
1045
1046 /* Setup n TRBs pointing to valid buffers */
1047 trb->bpl = lower_32_bits(buffer_addr);
1048 trb->bph = 0;
1049 trb->size = 0;
1050 trb->ctrl = DWC3_TRBCTL_NORMAL
1051 | DWC3_TRB_CTRL_IOC;
1052 buffer_addr += req->buf_len;
1053
1054 /* Set up the Link TRB at the end */
1055 if (i == (num_trbs - 1)) {
1056 trb->bpl = dwc3_trb_dma_offset(dep,
1057 &dep->trb_pool[0]);
1058 trb->bph = (1 << 23) | (1 << 21)
1059 | (ep->ep_intr_num << 16);
1060 trb->size = 0;
1061 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1062 | DWC3_TRB_CTRL_HWO;
1063 }
1064 }
1065 } else { /* OUT direction */
1066
1067 for (i = 0; i < num_trbs ; i++) {
1068
1069 trb = &dep->trb_pool[i];
1070 memset(trb, 0, sizeof(*trb));
1071 trb->bpl = lower_32_bits(buffer_addr);
1072 trb->bph = 0;
1073 trb->size = req->buf_len;
1074 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1075 | DWC3_TRB_CTRL_CSP
1076 | DWC3_TRB_CTRL_ISP_IMI;
1077 buffer_addr += req->buf_len;
1078
1079 /* Set up the Link TRB at the end */
1080 if (i == (num_trbs - 1)) {
1081 trb->bpl = dwc3_trb_dma_offset(dep,
1082 &dep->trb_pool[0]);
1083 trb->bph = (1 << 23) | (1 << 21)
1084 | (ep->ep_intr_num << 16);
1085 trb->size = 0;
1086 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1087 | DWC3_TRB_CTRL_HWO;
1088 }
1089 }
1090 }
1091 return 0;
1092}
1093
1094/*
1095* Frees TRBs for GSI EPs.
1096*
1097* @usb_ep - pointer to usb_ep instance.
1098*
1099*/
1100static void gsi_free_trbs(struct usb_ep *ep)
1101{
1102 struct dwc3_ep *dep = to_dwc3_ep(ep);
1103
1104 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1105 return;
1106
1107 /* Free TRBs and TRB pool for EP */
1108 if (dep->trb_dma_pool) {
1109 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1110 dep->trb_pool_dma);
1111 dma_pool_destroy(dep->trb_dma_pool);
1112 dep->trb_pool = NULL;
1113 dep->trb_pool_dma = 0;
1114 dep->trb_dma_pool = NULL;
1115 }
1116}
1117/*
1118* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1119*
1120* @usb_ep - pointer to usb_ep instance.
1121* @request - pointer to GSI request.
1122*/
1123static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1124{
1125 struct dwc3_ep *dep = to_dwc3_ep(ep);
1126 struct dwc3 *dwc = dep->dwc;
1127 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1128 struct dwc3_gadget_ep_cmd_params params;
1129 const struct usb_endpoint_descriptor *desc = ep->desc;
1130 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1131 u32 reg;
1132
1133 memset(&params, 0x00, sizeof(params));
1134
1135 /* Configure GSI EP */
1136 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1137 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1138
1139 /* Burst size is only needed in SuperSpeed mode */
1140 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1141 u32 burst = dep->endpoint.maxburst - 1;
1142
1143 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1144 }
1145
1146 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1147 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1148 | DWC3_DEPCFG_STREAM_EVENT_EN;
1149 dep->stream_capable = true;
1150 }
1151
1152 /* Set EP number */
1153 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1154
1155 /* Set interrupter number for GSI endpoints */
1156 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1157
1158 /* Enable XferInProgress and XferComplete Interrupts */
1159 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1160 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1161 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1162 /*
1163 * We must use the lower 16 TX FIFOs even though
1164 * HW might have more
1165 */
1166 /* Remove FIFO Number for GSI EP*/
1167 if (dep->direction)
1168 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1169
1170 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1171
1172 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1173 params.param0, params.param1, params.param2, dep->name);
1174
Mayank Rana83ad5822016-08-09 14:17:22 -07001175 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001176
1177 /* Set XferRsc Index for GSI EP */
1178 if (!(dep->flags & DWC3_EP_ENABLED)) {
1179 memset(&params, 0x00, sizeof(params));
1180 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001181 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001182 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1183
1184 dep->endpoint.desc = desc;
1185 dep->comp_desc = comp_desc;
1186 dep->type = usb_endpoint_type(desc);
1187 dep->flags |= DWC3_EP_ENABLED;
1188 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1189 reg |= DWC3_DALEPENA_EP(dep->number);
1190 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1191 }
1192
1193}
1194
1195/*
1196* Enables USB wrapper for GSI
1197*
1198* @usb_ep - pointer to usb_ep instance.
1199*/
1200static void gsi_enable(struct usb_ep *ep)
1201{
1202 struct dwc3_ep *dep = to_dwc3_ep(ep);
1203 struct dwc3 *dwc = dep->dwc;
1204 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1205
1206 dwc3_msm_write_reg_field(mdwc->base,
1207 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1208 dwc3_msm_write_reg_field(mdwc->base,
1209 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1210 dwc3_msm_write_reg_field(mdwc->base,
1211 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1212 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1213 dwc3_msm_write_reg_field(mdwc->base,
1214 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1215}
1216
1217/*
1218* Block or allow doorbell towards GSI
1219*
1220* @usb_ep - pointer to usb_ep instance.
1221* @request - pointer to GSI request. In this case num_bufs is used as a bool
1222* to set or clear the doorbell bit
1223*/
1224static void gsi_set_clear_dbell(struct usb_ep *ep,
1225 bool block_db)
1226{
1227
1228 struct dwc3_ep *dep = to_dwc3_ep(ep);
1229 struct dwc3 *dwc = dep->dwc;
1230 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1231
1232 dwc3_msm_write_reg_field(mdwc->base,
1233 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1234}
1235
1236/*
1237* Performs necessary checks before stopping GSI channels
1238*
1239* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1240*/
1241static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1242{
1243 u32 timeout = 1500;
1244 u32 reg = 0;
1245 struct dwc3_ep *dep = to_dwc3_ep(ep);
1246 struct dwc3 *dwc = dep->dwc;
1247 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1248
1249 while (dwc3_msm_read_reg_field(mdwc->base,
1250 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1251 if (!timeout--) {
1252 dev_err(mdwc->dev,
1253 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1254 return false;
1255 }
1256 }
1257 /* Check for U3 only if we are not handling Function Suspend */
1258 if (!f_suspend) {
1259 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1260 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1261 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1262 return false;
1263 }
1264 }
1265
1266 return true;
1267}
1268
1269
1270/**
1271* Performs GSI operations or GSI EP related operations.
1272*
1273* @usb_ep - pointer to usb_ep instance.
1274* @op_data - pointer to opcode related data.
1275* @op - GSI related or GSI EP related op code.
1276*
1277* @return int - 0 on success, negative on error.
1278* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1279*/
1280static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1281 void *op_data, enum gsi_ep_op op)
1282{
1283 u32 ret = 0;
1284 struct dwc3_ep *dep = to_dwc3_ep(ep);
1285 struct dwc3 *dwc = dep->dwc;
1286 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1287 struct usb_gsi_request *request;
1288 struct gsi_channel_info *ch_info;
1289 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001290 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001291
1292 switch (op) {
1293 case GSI_EP_OP_PREPARE_TRBS:
1294 request = (struct usb_gsi_request *)op_data;
1295 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1296 ret = gsi_prepare_trbs(ep, request);
1297 break;
1298 case GSI_EP_OP_FREE_TRBS:
1299 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1300 gsi_free_trbs(ep);
1301 break;
1302 case GSI_EP_OP_CONFIG:
1303 request = (struct usb_gsi_request *)op_data;
1304 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001305 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001306 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001307 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001308 break;
1309 case GSI_EP_OP_STARTXFER:
1310 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001311 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001312 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001313 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001314 break;
1315 case GSI_EP_OP_GET_XFER_IDX:
1316 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1317 ret = gsi_get_xfer_index(ep);
1318 break;
1319 case GSI_EP_OP_STORE_DBL_INFO:
1320 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1321 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1322 break;
1323 case GSI_EP_OP_ENABLE_GSI:
1324 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1325 gsi_enable(ep);
1326 break;
1327 case GSI_EP_OP_GET_CH_INFO:
1328 ch_info = (struct gsi_channel_info *)op_data;
1329 gsi_get_channel_info(ep, ch_info);
1330 break;
1331 case GSI_EP_OP_RING_IN_DB:
1332 request = (struct usb_gsi_request *)op_data;
1333 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1334 gsi_ring_in_db(ep, request);
1335 break;
1336 case GSI_EP_OP_UPDATEXFER:
1337 request = (struct usb_gsi_request *)op_data;
1338 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001339 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001340 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001341 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001342 break;
1343 case GSI_EP_OP_ENDXFER:
1344 request = (struct usb_gsi_request *)op_data;
1345 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001346 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001347 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001348 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001349 break;
1350 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1351 block_db = *((bool *)op_data);
1352 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1353 block_db);
1354 gsi_set_clear_dbell(ep, block_db);
1355 break;
1356 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1357 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1358 f_suspend = *((bool *)op_data);
1359 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1360 break;
1361 case GSI_EP_OP_DISABLE:
1362 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1363 ret = ep->ops->disable(ep);
1364 break;
1365 default:
1366 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1367 }
1368
1369 return ret;
1370}
1371
1372/**
1373 * Configure MSM endpoint.
1374 * This function do specific configurations
1375 * to an endpoint which need specific implementaion
1376 * in the MSM architecture.
1377 *
1378 * This function should be called by usb function/class
1379 * layer which need a support from the specific MSM HW
1380 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1381 *
1382 * @ep - a pointer to some usb_ep instance
1383 *
1384 * @return int - 0 on success, negetive on error.
1385 */
1386int msm_ep_config(struct usb_ep *ep)
1387{
1388 struct dwc3_ep *dep = to_dwc3_ep(ep);
1389 struct dwc3 *dwc = dep->dwc;
1390 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1391 struct usb_ep_ops *new_ep_ops;
1392
1393
1394 /* Save original ep ops for future restore*/
1395 if (mdwc->original_ep_ops[dep->number]) {
1396 dev_err(mdwc->dev,
1397 "ep [%s,%d] already configured as msm endpoint\n",
1398 ep->name, dep->number);
1399 return -EPERM;
1400 }
1401 mdwc->original_ep_ops[dep->number] = ep->ops;
1402
1403 /* Set new usb ops as we like */
1404 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1405 if (!new_ep_ops)
1406 return -ENOMEM;
1407
1408 (*new_ep_ops) = (*ep->ops);
1409 new_ep_ops->queue = dwc3_msm_ep_queue;
1410 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1411 ep->ops = new_ep_ops;
1412
1413 /*
1414 * Do HERE more usb endpoint configurations
1415 * which are specific to MSM.
1416 */
1417
1418 return 0;
1419}
1420EXPORT_SYMBOL(msm_ep_config);
1421
1422/**
1423 * Un-configure MSM endpoint.
1424 * Tear down configurations done in the
1425 * dwc3_msm_ep_config function.
1426 *
1427 * @ep - a pointer to some usb_ep instance
1428 *
1429 * @return int - 0 on success, negative on error.
1430 */
1431int msm_ep_unconfig(struct usb_ep *ep)
1432{
1433 struct dwc3_ep *dep = to_dwc3_ep(ep);
1434 struct dwc3 *dwc = dep->dwc;
1435 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1436 struct usb_ep_ops *old_ep_ops;
1437
1438 /* Restore original ep ops */
1439 if (!mdwc->original_ep_ops[dep->number]) {
1440 dev_err(mdwc->dev,
1441 "ep [%s,%d] was not configured as msm endpoint\n",
1442 ep->name, dep->number);
1443 return -EINVAL;
1444 }
1445 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1446 ep->ops = mdwc->original_ep_ops[dep->number];
1447 mdwc->original_ep_ops[dep->number] = NULL;
1448 kfree(old_ep_ops);
1449
1450 /*
1451 * Do HERE more usb endpoint un-configurations
1452 * which are specific to MSM.
1453 */
1454
1455 return 0;
1456}
1457EXPORT_SYMBOL(msm_ep_unconfig);
1458#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1459
1460static void dwc3_resume_work(struct work_struct *w);
1461
1462static void dwc3_restart_usb_work(struct work_struct *w)
1463{
1464 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1465 restart_usb_work);
1466 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1467 unsigned int timeout = 50;
1468
1469 dev_dbg(mdwc->dev, "%s\n", __func__);
1470
1471 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1472 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1473 return;
1474 }
1475
1476 /* guard against concurrent VBUS handling */
1477 mdwc->in_restart = true;
1478
1479 if (!mdwc->vbus_active) {
1480 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1481 dwc->err_evt_seen = false;
1482 mdwc->in_restart = false;
1483 return;
1484 }
1485
Mayank Rana08e41922017-03-02 15:25:48 -08001486 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001487 /* Reset active USB connection */
1488 dwc3_resume_work(&mdwc->resume_work);
1489
1490 /* Make sure disconnect is processed before sending connect */
1491 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1492 msleep(20);
1493
1494 if (!timeout) {
1495 dev_dbg(mdwc->dev,
1496 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001497 dbg_event(0xFF, "ReStart:RT SUSP",
1498 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001499 pm_runtime_suspend(mdwc->dev);
1500 }
1501
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301502 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001503 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301504 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001505 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001506
1507 dwc->err_evt_seen = false;
1508 flush_delayed_work(&mdwc->sm_work);
1509}
1510
Manu Gautam976fdfc2016-08-18 09:27:35 +05301511static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1512 unsigned long action, void *priv)
1513{
1514 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1515 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1516 struct usb_bus *bus = priv;
1517
1518 /* Interested only in recovery when HC dies */
1519 if (action != USB_BUS_DIED)
1520 return 0;
1521
1522 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1523 /* Recovery already under process */
1524 if (mdwc->hc_died)
1525 return 0;
1526
1527 if (bus->controller != &dwc->xhci->dev) {
1528 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1529 return 0;
1530 }
1531
1532 mdwc->hc_died = true;
1533 schedule_delayed_work(&mdwc->sm_work, 0);
1534 return 0;
1535}
1536
1537
Mayank Rana511f3b22016-08-02 12:00:11 -07001538/*
1539 * Check whether the DWC3 requires resetting the ep
1540 * after going to Low Power Mode (lpm)
1541 */
1542bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1543{
1544 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1545 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1546
1547 return dbm_reset_ep_after_lpm(mdwc->dbm);
1548}
1549EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1550
1551/*
1552 * Config Global Distributed Switch Controller (GDSC)
1553 * to support controller power collapse
1554 */
1555static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1556{
1557 int ret;
1558
1559 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1560 return -EPERM;
1561
1562 if (on) {
1563 ret = regulator_enable(mdwc->dwc3_gdsc);
1564 if (ret) {
1565 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1566 return ret;
1567 }
1568 } else {
1569 ret = regulator_disable(mdwc->dwc3_gdsc);
1570 if (ret) {
1571 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1572 return ret;
1573 }
1574 }
1575
1576 return ret;
1577}
1578
1579static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1580{
1581 int ret = 0;
1582
1583 if (assert) {
1584 disable_irq(mdwc->pwr_event_irq);
1585 /* Using asynchronous block reset to the hardware */
1586 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1587 clk_disable_unprepare(mdwc->utmi_clk);
1588 clk_disable_unprepare(mdwc->sleep_clk);
1589 clk_disable_unprepare(mdwc->core_clk);
1590 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301591 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001592 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301593 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001594 } else {
1595 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301596 ret = reset_control_deassert(mdwc->core_reset);
1597 if (ret)
1598 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001599 ndelay(200);
1600 clk_prepare_enable(mdwc->iface_clk);
1601 clk_prepare_enable(mdwc->core_clk);
1602 clk_prepare_enable(mdwc->sleep_clk);
1603 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001604 enable_irq(mdwc->pwr_event_irq);
1605 }
1606
1607 return ret;
1608}
1609
1610static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1611{
1612 u32 guctl, gfladj = 0;
1613
1614 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1615 guctl &= ~DWC3_GUCTL_REFCLKPER;
1616
1617 /* GFLADJ register is used starting with revision 2.50a */
1618 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1619 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1620 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1621 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1622 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1623 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1624 }
1625
1626 /* Refer to SNPS Databook Table 6-55 for calculations used */
1627 switch (mdwc->utmi_clk_rate) {
1628 case 19200000:
1629 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1630 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1631 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1632 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1633 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1634 break;
1635 case 24000000:
1636 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1637 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1638 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1639 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1640 break;
1641 default:
1642 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1643 mdwc->utmi_clk_rate);
1644 break;
1645 }
1646
1647 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1648 if (gfladj)
1649 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1650}
1651
1652/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1653static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1654{
1655 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1656 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1657 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1658 BIT(2), 1);
1659
1660 /*
1661 * Enable master clock for RAMs to allow BAM to access RAMs when
1662 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1663 * are seen where RAM clocks get turned OFF in SS mode
1664 */
1665 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1666 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1667
1668}
1669
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001670static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1671{
1672 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1673 vbus_draw_work);
1674 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1675
1676 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1677}
1678
Mayank Rana511f3b22016-08-02 12:00:11 -07001679static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1680{
1681 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001682 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001683 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001684 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001685
1686 switch (event) {
1687 case DWC3_CONTROLLER_ERROR_EVENT:
1688 dev_info(mdwc->dev,
1689 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1690 dwc->irq_cnt);
1691
1692 dwc3_gadget_disable_irq(dwc);
1693
1694 /* prevent core from generating interrupts until recovery */
1695 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1696 reg |= DWC3_GCTL_CORESOFTRESET;
1697 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1698
1699 /* restart USB which performs full reset and reconnect */
1700 schedule_work(&mdwc->restart_usb_work);
1701 break;
1702 case DWC3_CONTROLLER_RESET_EVENT:
1703 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1704 /* HS & SSPHYs get reset as part of core soft reset */
1705 dwc3_msm_qscratch_reg_init(mdwc);
1706 break;
1707 case DWC3_CONTROLLER_POST_RESET_EVENT:
1708 dev_dbg(mdwc->dev,
1709 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1710
1711 /*
1712 * Below sequence is used when controller is working without
1713 * having ssphy and only USB high speed is supported.
1714 */
1715 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1716 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1717 dwc3_msm_read_reg(mdwc->base,
1718 QSCRATCH_GENERAL_CFG)
1719 | PIPE_UTMI_CLK_DIS);
1720
1721 usleep_range(2, 5);
1722
1723
1724 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1725 dwc3_msm_read_reg(mdwc->base,
1726 QSCRATCH_GENERAL_CFG)
1727 | PIPE_UTMI_CLK_SEL
1728 | PIPE3_PHYSTATUS_SW);
1729
1730 usleep_range(2, 5);
1731
1732 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1733 dwc3_msm_read_reg(mdwc->base,
1734 QSCRATCH_GENERAL_CFG)
1735 & ~PIPE_UTMI_CLK_DIS);
1736 }
1737
1738 dwc3_msm_update_ref_clk(mdwc);
1739 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1740 break;
1741 case DWC3_CONTROLLER_CONNDONE_EVENT:
1742 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1743 /*
1744 * Add power event if the dbm indicates coming out of L1 by
1745 * interrupt
1746 */
1747 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1748 dwc3_msm_write_reg_field(mdwc->base,
1749 PWR_EVNT_IRQ_MASK_REG,
1750 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1751
1752 atomic_set(&dwc->in_lpm, 0);
1753 break;
1754 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1755 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1756 if (dwc->enable_bus_suspend) {
1757 mdwc->suspend = dwc->b_suspend;
1758 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1759 }
1760 break;
1761 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1762 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001763 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001764 break;
1765 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1766 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001767 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001768 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001769 case DWC3_GSI_EVT_BUF_ALLOC:
1770 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1771
1772 if (!mdwc->num_gsi_event_buffers)
1773 break;
1774
1775 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1776 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1777 GFP_KERNEL);
1778 if (!mdwc->gsi_ev_buff) {
1779 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1780 break;
1781 }
1782
1783 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1784
1785 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1786 if (!evt)
1787 break;
1788 evt->dwc = dwc;
1789 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1790 evt->buf = dma_alloc_coherent(dwc->dev,
1791 DWC3_EVENT_BUFFERS_SIZE,
1792 &evt->dma, GFP_KERNEL);
1793 if (!evt->buf) {
1794 dev_err(dwc->dev,
1795 "can't allocate gsi_evt_buf(%d)\n", i);
1796 break;
1797 }
1798 mdwc->gsi_ev_buff[i] = evt;
1799 }
1800 break;
1801 case DWC3_GSI_EVT_BUF_SETUP:
1802 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1803 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1804 evt = mdwc->gsi_ev_buff[i];
1805 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1806 evt->buf, (unsigned long long) evt->dma,
1807 evt->length);
1808 memset(evt->buf, 0, evt->length);
1809 evt->lpos = 0;
1810 /*
1811 * Primary event buffer is programmed with registers
1812 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1813 * program USB GSI related event buffer with DWC3
1814 * controller.
1815 */
1816 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1817 lower_32_bits(evt->dma));
1818 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1819 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1820 DWC3_GEVENT_TYPE_GSI) |
1821 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1822 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1823 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1824 ((evt->length) & 0xffff));
1825 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1826 }
1827 break;
1828 case DWC3_GSI_EVT_BUF_CLEANUP:
1829 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1830 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1831 evt = mdwc->gsi_ev_buff[i];
1832 evt->lpos = 0;
1833 /*
1834 * Primary event buffer is programmed with registers
1835 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1836 * program USB GSI related event buffer with DWC3
1837 * controller.
1838 */
1839 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1840 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1841 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1842 DWC3_GEVNTSIZ_INTMASK |
1843 DWC3_GEVNTSIZ_SIZE((i+1)));
1844 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1845 }
1846 break;
1847 case DWC3_GSI_EVT_BUF_FREE:
1848 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1849 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1850 evt = mdwc->gsi_ev_buff[i];
1851 if (evt)
1852 dma_free_coherent(dwc->dev, evt->length,
1853 evt->buf, evt->dma);
1854 }
1855 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001856 default:
1857 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1858 break;
1859 }
1860}
1861
1862static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1863{
1864 int ret = 0;
1865
1866 if (core_reset) {
1867 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1868 if (ret)
1869 return;
1870
1871 usleep_range(1000, 1200);
1872 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1873 if (ret)
1874 return;
1875
1876 usleep_range(10000, 12000);
1877 }
1878
1879 if (mdwc->dbm) {
1880 /* Reset the DBM */
1881 dbm_soft_reset(mdwc->dbm, 1);
1882 usleep_range(1000, 1200);
1883 dbm_soft_reset(mdwc->dbm, 0);
1884
1885 /*enable DBM*/
1886 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1887 DBM_EN_MASK, 0x1);
1888 dbm_enable(mdwc->dbm);
1889 }
1890}
1891
1892static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1893{
1894 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1895 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301896 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001897
1898 /* Configure AHB2PHY for one wait state read/write */
1899 if (mdwc->ahb2phy_base) {
1900 clk_prepare_enable(mdwc->cfg_ahb_clk);
1901 val = readl_relaxed(mdwc->ahb2phy_base +
1902 PERIPH_SS_AHB2PHY_TOP_CFG);
1903 if (val != ONE_READ_WRITE_WAIT) {
1904 writel_relaxed(ONE_READ_WRITE_WAIT,
1905 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1906 /* complete above write before configuring USB PHY. */
1907 mb();
1908 }
1909 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1910 }
1911
1912 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001913 dbg_event(0xFF, "dwc3 init",
1914 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301915 ret = dwc3_core_pre_init(dwc);
1916 if (ret) {
1917 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1918 return;
1919 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001920 mdwc->init = true;
1921 }
1922
1923 dwc3_core_init(dwc);
1924 /* Re-configure event buffers */
1925 dwc3_event_buffers_setup(dwc);
1926}
1927
1928static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1929{
1930 unsigned long timeout;
1931 u32 reg = 0;
1932
1933 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301934 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001935 if (!atomic_read(&mdwc->in_p3)) {
1936 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1937 return -EBUSY;
1938 }
1939 }
1940
1941 /* Clear previous L2 events */
1942 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1943 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1944
1945 /* Prepare HSPHY for suspend */
1946 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1947 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1948 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1949
1950 /* Wait for PHY to go into L2 */
1951 timeout = jiffies + msecs_to_jiffies(5);
1952 while (!time_after(jiffies, timeout)) {
1953 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1954 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1955 break;
1956 }
1957 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1958 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1959
1960 /* Clear L2 event bit */
1961 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1962 PWR_EVNT_LPM_IN_L2_MASK);
1963
1964 return 0;
1965}
1966
1967static void dwc3_msm_bus_vote_w(struct work_struct *w)
1968{
1969 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1970 int ret;
1971
1972 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1973 mdwc->bus_vote);
1974 if (ret)
1975 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1976}
1977
1978static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1979{
1980 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1981 int i, num_ports;
1982 u32 reg;
1983
1984 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1985 if (mdwc->in_host_mode) {
1986 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1987 num_ports = HCS_MAX_PORTS(reg);
1988 for (i = 0; i < num_ports; i++) {
1989 reg = dwc3_msm_read_reg(mdwc->base,
1990 USB3_PORTSC + i*0x10);
1991 if (reg & PORT_PE) {
1992 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1993 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1994 else if (DEV_LOWSPEED(reg))
1995 mdwc->hs_phy->flags |= PHY_LS_MODE;
1996 }
1997 }
1998 } else {
1999 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2000 dwc->gadget.speed == USB_SPEED_FULL)
2001 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2002 else if (dwc->gadget.speed == USB_SPEED_LOW)
2003 mdwc->hs_phy->flags |= PHY_LS_MODE;
2004 }
2005}
2006
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302007static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2008 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002009
2010static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2011{
Mayank Rana83ad5822016-08-09 14:17:22 -07002012 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002013 bool can_suspend_ssphy;
2014 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002015 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07002016
2017 if (atomic_read(&dwc->in_lpm)) {
2018 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
2019 return 0;
2020 }
2021
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302022 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2023 msm_dwc3_perf_vote_update(mdwc, false);
2024
Mayank Rana511f3b22016-08-02 12:00:11 -07002025 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002026 evt = dwc->ev_buf;
2027 if ((evt->flags & DWC3_EVENT_PENDING)) {
2028 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002029 "%s: %d device events pending, abort suspend\n",
2030 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07002031 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002032 }
2033 }
2034
2035 if (!mdwc->vbus_active && dwc->is_drd &&
2036 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2037 /*
2038 * In some cases, the pm_runtime_suspend may be called by
2039 * usb_bam when there is pending lpm flag. However, if this is
2040 * done when cable was disconnected and otg state has not
2041 * yet changed to IDLE, then it means OTG state machine
2042 * is running and we race against it. So cancel LPM for now,
2043 * and OTG state machine will go for LPM later, after completing
2044 * transition to IDLE state.
2045 */
2046 dev_dbg(mdwc->dev,
2047 "%s: cable disconnected while not in idle otg state\n",
2048 __func__);
2049 return -EBUSY;
2050 }
2051
2052 /*
2053 * Check if device is not in CONFIGURED state
2054 * then check controller state of L2 and break
2055 * LPM sequence. Check this for device bus suspend case.
2056 */
2057 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2058 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2059 pr_err("%s(): Trying to go in LPM with state:%d\n",
2060 __func__, dwc->gadget.state);
2061 pr_err("%s(): LPM is not performed.\n", __func__);
2062 return -EBUSY;
2063 }
2064
2065 ret = dwc3_msm_prepare_suspend(mdwc);
2066 if (ret)
2067 return ret;
2068
2069 /* Initialize variables here */
2070 can_suspend_ssphy = !(mdwc->in_host_mode &&
2071 dwc3_msm_is_host_superspeed(mdwc));
2072
2073 /* Disable core irq */
2074 if (dwc->irq)
2075 disable_irq(dwc->irq);
2076
2077 /* disable power event irq, hs and ss phy irq is used as wake up src */
2078 disable_irq(mdwc->pwr_event_irq);
2079
2080 dwc3_set_phy_speed_flags(mdwc);
2081 /* Suspend HS PHY */
2082 usb_phy_set_suspend(mdwc->hs_phy, 1);
2083
2084 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002085 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002086 /* indicate phy about SS mode */
2087 if (dwc3_msm_is_superspeed(mdwc))
2088 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2089 usb_phy_set_suspend(mdwc->ss_phy, 1);
2090 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2091 }
2092
2093 /* make sure above writes are completed before turning off clocks */
2094 wmb();
2095
2096 /* Disable clocks */
2097 if (mdwc->bus_aggr_clk)
2098 clk_disable_unprepare(mdwc->bus_aggr_clk);
2099 clk_disable_unprepare(mdwc->utmi_clk);
2100
Hemant Kumar633dc332016-08-10 13:41:05 -07002101 /* Memory core: OFF, Memory periphery: OFF */
2102 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2103 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2104 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2105 }
2106
Mayank Rana511f3b22016-08-02 12:00:11 -07002107 clk_set_rate(mdwc->core_clk, 19200000);
2108 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302109 if (mdwc->noc_aggr_clk)
2110 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002111 /*
2112 * Disable iface_clk only after core_clk as core_clk has FSM
2113 * depedency on iface_clk. Hence iface_clk should be turned off
2114 * after core_clk is turned off.
2115 */
2116 clk_disable_unprepare(mdwc->iface_clk);
2117 /* USB PHY no more requires TCXO */
2118 clk_disable_unprepare(mdwc->xo_clk);
2119
2120 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002121 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002122 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2123 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2124 dwc3_msm_config_gdsc(mdwc, 0);
2125 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002126
2127 if (mdwc->iommu_map)
2128 arm_iommu_detach_device(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002129 }
2130
2131 /* Remove bus voting */
2132 if (mdwc->bus_perf_client) {
2133 mdwc->bus_vote = 0;
2134 schedule_work(&mdwc->bus_vote_w);
2135 }
2136
2137 /*
2138 * release wakeup source with timeout to defer system suspend to
2139 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2140 * event is received.
2141 */
2142 if (mdwc->lpm_to_suspend_delay) {
2143 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2144 mdwc->lpm_to_suspend_delay);
2145 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2146 } else {
2147 pm_relax(mdwc->dev);
2148 }
2149
2150 atomic_set(&dwc->in_lpm, 1);
2151
2152 /*
2153 * with DCP or during cable disconnect, we dont require wakeup
2154 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2155 * case of host bus suspend and device bus suspend.
2156 */
2157 if (mdwc->vbus_active || mdwc->in_host_mode) {
2158 enable_irq_wake(mdwc->hs_phy_irq);
2159 enable_irq(mdwc->hs_phy_irq);
2160 if (mdwc->ss_phy_irq) {
2161 enable_irq_wake(mdwc->ss_phy_irq);
2162 enable_irq(mdwc->ss_phy_irq);
2163 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002164 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2165 }
2166
2167 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2168 return 0;
2169}
2170
2171static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2172{
2173 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002174 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002175 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2176
2177 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2178
2179 if (!atomic_read(&dwc->in_lpm)) {
2180 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2181 return 0;
2182 }
2183
2184 pm_stay_awake(mdwc->dev);
2185
2186 /* Enable bus voting */
2187 if (mdwc->bus_perf_client) {
2188 mdwc->bus_vote = 1;
2189 schedule_work(&mdwc->bus_vote_w);
2190 }
2191
2192 /* Vote for TCXO while waking up USB HSPHY */
2193 ret = clk_prepare_enable(mdwc->xo_clk);
2194 if (ret)
2195 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2196 __func__, ret);
2197
2198 /* Restore controller power collapse */
2199 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2200 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2201 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302202 ret = reset_control_assert(mdwc->core_reset);
2203 if (ret)
2204 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2205 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002206 /* HW requires a short delay for reset to take place properly */
2207 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302208 ret = reset_control_deassert(mdwc->core_reset);
2209 if (ret)
2210 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2211 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002212 clk_prepare_enable(mdwc->sleep_clk);
2213 }
2214
2215 /*
2216 * Enable clocks
2217 * Turned ON iface_clk before core_clk due to FSM depedency.
2218 */
2219 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302220 if (mdwc->noc_aggr_clk)
2221 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002222
2223 core_clk_rate = mdwc->core_clk_rate;
2224 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2225 core_clk_rate = mdwc->core_clk_rate_hs;
2226 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2227 core_clk_rate);
2228 }
2229
2230 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002231 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002232
2233 /* set Memory core: ON, Memory periphery: ON */
2234 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2235 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2236
Mayank Rana511f3b22016-08-02 12:00:11 -07002237 clk_prepare_enable(mdwc->utmi_clk);
2238 if (mdwc->bus_aggr_clk)
2239 clk_prepare_enable(mdwc->bus_aggr_clk);
2240
2241 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002242 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2243 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002244 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2245 if (mdwc->typec_orientation == ORIENTATION_CC1)
2246 mdwc->ss_phy->flags |= PHY_LANE_A;
2247 if (mdwc->typec_orientation == ORIENTATION_CC2)
2248 mdwc->ss_phy->flags |= PHY_LANE_B;
2249 usb_phy_set_suspend(mdwc->ss_phy, 0);
2250 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2251 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2252 }
2253
2254 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2255 /* Resume HS PHY */
2256 usb_phy_set_suspend(mdwc->hs_phy, 0);
2257
2258 /* Recover from controller power collapse */
2259 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2260 u32 tmp;
2261
2262 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2263
2264 dwc3_msm_power_collapse_por(mdwc);
2265
2266 /* Get initial P3 status and enable IN_P3 event */
2267 tmp = dwc3_msm_read_reg_field(mdwc->base,
2268 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2269 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2270 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2271 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2272
2273 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
Jack Phambbe27962017-03-23 18:42:26 -07002274
2275 if (mdwc->iommu_map) {
2276 ret = arm_iommu_attach_device(mdwc->dev,
2277 mdwc->iommu_map);
2278 if (ret)
2279 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2280 ret);
2281 else
2282 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2283 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002284 }
2285
2286 atomic_set(&dwc->in_lpm, 0);
2287
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302288 /* enable power evt irq for IN P3 detection */
2289 enable_irq(mdwc->pwr_event_irq);
2290
Mayank Rana511f3b22016-08-02 12:00:11 -07002291 /* Disable HSPHY auto suspend */
2292 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2293 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2294 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2295 DWC3_GUSB2PHYCFG_SUSPHY));
2296
2297 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2298 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2299 disable_irq_wake(mdwc->hs_phy_irq);
2300 disable_irq_nosync(mdwc->hs_phy_irq);
2301 if (mdwc->ss_phy_irq) {
2302 disable_irq_wake(mdwc->ss_phy_irq);
2303 disable_irq_nosync(mdwc->ss_phy_irq);
2304 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002305 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2306 }
2307
2308 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2309
Mayank Rana511f3b22016-08-02 12:00:11 -07002310 /* Enable core irq */
2311 if (dwc->irq)
2312 enable_irq(dwc->irq);
2313
2314 /*
2315 * Handle other power events that could not have been handled during
2316 * Low Power Mode
2317 */
2318 dwc3_pwr_event_handler(mdwc);
2319
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302320 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2321 schedule_delayed_work(&mdwc->perf_vote_work,
2322 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2323
Mayank Rana08e41922017-03-02 15:25:48 -08002324 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002325 return 0;
2326}
2327
2328/**
2329 * dwc3_ext_event_notify - callback to handle events from external transceiver
2330 *
2331 * Returns 0 on success
2332 */
2333static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2334{
2335 /* Flush processing any pending events before handling new ones */
2336 flush_delayed_work(&mdwc->sm_work);
2337
2338 if (mdwc->id_state == DWC3_ID_FLOAT) {
2339 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2340 set_bit(ID, &mdwc->inputs);
2341 } else {
2342 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2343 clear_bit(ID, &mdwc->inputs);
2344 }
2345
2346 if (mdwc->vbus_active && !mdwc->in_restart) {
2347 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2348 set_bit(B_SESS_VLD, &mdwc->inputs);
2349 } else {
2350 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2351 clear_bit(B_SESS_VLD, &mdwc->inputs);
2352 }
2353
2354 if (mdwc->suspend) {
2355 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2356 set_bit(B_SUSPEND, &mdwc->inputs);
2357 } else {
2358 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2359 clear_bit(B_SUSPEND, &mdwc->inputs);
2360 }
2361
2362 schedule_delayed_work(&mdwc->sm_work, 0);
2363}
2364
2365static void dwc3_resume_work(struct work_struct *w)
2366{
2367 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002368 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002369
2370 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2371
2372 /*
2373 * exit LPM first to meet resume timeline from device side.
2374 * resume_pending flag would prevent calling
2375 * dwc3_msm_resume() in case we are here due to system
2376 * wide resume without usb cable connected. This flag is set
2377 * only in case of power event irq in lpm.
2378 */
2379 if (mdwc->resume_pending) {
2380 dwc3_msm_resume(mdwc);
2381 mdwc->resume_pending = false;
2382 }
2383
Mayank Rana08e41922017-03-02 15:25:48 -08002384 if (atomic_read(&mdwc->pm_suspended)) {
2385 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002386 /* let pm resume kick in resume work later */
2387 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002388 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002389 dwc3_ext_event_notify(mdwc);
2390}
2391
2392static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2393{
2394 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2395 u32 irq_stat, irq_clear = 0;
2396
2397 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2398 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2399
2400 /* Check for P3 events */
2401 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2402 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2403 /* Can't tell if entered or exit P3, so check LINKSTATE */
2404 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2405 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2406 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2407 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2408
2409 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2410 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2411 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2412 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2413 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2414 atomic_set(&mdwc->in_p3, 0);
2415 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2416 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2417 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2418 atomic_set(&mdwc->in_p3, 1);
2419 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2420 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2421 }
2422
2423 /* Clear L2 exit */
2424 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2425 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2426 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2427 }
2428
2429 /* Handle exit from L1 events */
2430 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2431 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2432 __func__);
2433 if (usb_gadget_wakeup(&dwc->gadget))
2434 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2435 __func__);
2436 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2437 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2438 }
2439
2440 /* Unhandled events */
2441 if (irq_stat)
2442 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2443 __func__, irq_stat);
2444
2445 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2446}
2447
2448static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2449{
2450 struct dwc3_msm *mdwc = _mdwc;
2451 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2452
2453 dev_dbg(mdwc->dev, "%s\n", __func__);
2454
2455 if (atomic_read(&dwc->in_lpm))
2456 dwc3_resume_work(&mdwc->resume_work);
2457 else
2458 dwc3_pwr_event_handler(mdwc);
2459
Mayank Rana08e41922017-03-02 15:25:48 -08002460 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002461 return IRQ_HANDLED;
2462}
2463
2464static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2465{
2466 struct dwc3_msm *mdwc = data;
2467 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2468
2469 dwc->t_pwr_evt_irq = ktime_get();
2470 dev_dbg(mdwc->dev, "%s received\n", __func__);
2471 /*
2472 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2473 * which interrupts have been triggered, as the clocks are disabled.
2474 * Resume controller by waking up pwr event irq thread.After re-enabling
2475 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2476 * all other power events.
2477 */
2478 if (atomic_read(&dwc->in_lpm)) {
2479 /* set this to call dwc3_msm_resume() */
2480 mdwc->resume_pending = true;
2481 return IRQ_WAKE_THREAD;
2482 }
2483
2484 dwc3_pwr_event_handler(mdwc);
2485 return IRQ_HANDLED;
2486}
2487
2488static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2489 unsigned long action, void *hcpu)
2490{
2491 uint32_t cpu = (uintptr_t)hcpu;
2492 struct dwc3_msm *mdwc =
2493 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2494
2495 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2496 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2497 cpu_to_affin, mdwc->irq_to_affin);
2498 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2499 }
2500
2501 return NOTIFY_OK;
2502}
2503
2504static void dwc3_otg_sm_work(struct work_struct *w);
2505
2506static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2507{
2508 int ret;
2509
2510 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2511 if (IS_ERR(mdwc->dwc3_gdsc))
2512 mdwc->dwc3_gdsc = NULL;
2513
2514 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2515 if (IS_ERR(mdwc->xo_clk)) {
2516 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2517 __func__);
2518 ret = PTR_ERR(mdwc->xo_clk);
2519 return ret;
2520 }
2521 clk_set_rate(mdwc->xo_clk, 19200000);
2522
2523 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2524 if (IS_ERR(mdwc->iface_clk)) {
2525 dev_err(mdwc->dev, "failed to get iface_clk\n");
2526 ret = PTR_ERR(mdwc->iface_clk);
2527 return ret;
2528 }
2529
2530 /*
2531 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2532 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2533 * On newer platform it can run at 150MHz as well.
2534 */
2535 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2536 if (IS_ERR(mdwc->core_clk)) {
2537 dev_err(mdwc->dev, "failed to get core_clk\n");
2538 ret = PTR_ERR(mdwc->core_clk);
2539 return ret;
2540 }
2541
Amit Nischal4d278212016-06-06 17:54:34 +05302542 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2543 if (IS_ERR(mdwc->core_reset)) {
2544 dev_err(mdwc->dev, "failed to get core_reset\n");
2545 return PTR_ERR(mdwc->core_reset);
2546 }
2547
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302548 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302549 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302550 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2551 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302552 }
2553
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302554 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302555 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302556 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2557 mdwc->core_clk_rate);
2558 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2559 if (ret)
2560 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002561
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002562 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2563 (u32 *)&mdwc->core_clk_rate_hs)) {
2564 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2565 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2566 }
2567
Mayank Rana511f3b22016-08-02 12:00:11 -07002568 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2569 if (IS_ERR(mdwc->sleep_clk)) {
2570 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2571 ret = PTR_ERR(mdwc->sleep_clk);
2572 return ret;
2573 }
2574
2575 clk_set_rate(mdwc->sleep_clk, 32000);
2576 mdwc->utmi_clk_rate = 19200000;
2577 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2578 if (IS_ERR(mdwc->utmi_clk)) {
2579 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2580 ret = PTR_ERR(mdwc->utmi_clk);
2581 return ret;
2582 }
2583
2584 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2585 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2586 if (IS_ERR(mdwc->bus_aggr_clk))
2587 mdwc->bus_aggr_clk = NULL;
2588
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302589 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2590 if (IS_ERR(mdwc->noc_aggr_clk))
2591 mdwc->noc_aggr_clk = NULL;
2592
Mayank Rana511f3b22016-08-02 12:00:11 -07002593 if (of_property_match_string(mdwc->dev->of_node,
2594 "clock-names", "cfg_ahb_clk") >= 0) {
2595 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2596 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2597 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2598 mdwc->cfg_ahb_clk = NULL;
2599 if (ret != -EPROBE_DEFER)
2600 dev_err(mdwc->dev,
2601 "failed to get cfg_ahb_clk ret %d\n",
2602 ret);
2603 return ret;
2604 }
2605 }
2606
2607 return 0;
2608}
2609
2610static int dwc3_msm_id_notifier(struct notifier_block *nb,
2611 unsigned long event, void *ptr)
2612{
2613 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002614 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002615 struct extcon_dev *edev = ptr;
2616 enum dwc3_id_state id;
2617 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002618 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002619
2620 if (!edev) {
2621 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2622 goto done;
2623 }
2624
2625 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2626
2627 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2628
2629 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2630 if (cc_state < 0)
2631 mdwc->typec_orientation = ORIENTATION_NONE;
2632 else
2633 mdwc->typec_orientation =
2634 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2635
Mayank Rana08e41922017-03-02 15:25:48 -08002636 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002637
2638 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
Vijayavardhan Vennapusa1965ae982017-01-16 13:35:17 +05302639 /* Use default dwc->maximum_speed if extcon doesn't report speed. */
2640 if (speed >= 0)
2641 dwc->maximum_speed =
2642 (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2643
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002644 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2645 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002646
Mayank Rana511f3b22016-08-02 12:00:11 -07002647 if (mdwc->id_state != id) {
2648 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002649 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002650 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2651 }
2652
2653done:
2654 return NOTIFY_DONE;
2655}
2656
2657static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2658 unsigned long event, void *ptr)
2659{
2660 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2661 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2662 struct extcon_dev *edev = ptr;
2663 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002664 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002665
2666 if (!edev) {
2667 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2668 goto done;
2669 }
2670
2671 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2672
2673 if (mdwc->vbus_active == event)
2674 return NOTIFY_DONE;
2675
2676 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2677 if (cc_state < 0)
2678 mdwc->typec_orientation = ORIENTATION_NONE;
2679 else
2680 mdwc->typec_orientation =
2681 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2682
Mayank Rana08e41922017-03-02 15:25:48 -08002683 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
Hemant Kumarde1df692016-04-26 19:36:48 -07002684
2685 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
Vijayavardhan Vennapusa1965ae982017-01-16 13:35:17 +05302686 /* Use default dwc->maximum_speed if extcon doesn't report speed. */
2687 if (speed >= 0)
2688 dwc->maximum_speed =
2689 (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2690
Vamsi Krishna Samavedam86ed20b2017-01-31 13:55:38 -08002691 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2692 dwc->maximum_speed = dwc->max_hw_supp_speed;
Hemant Kumarde1df692016-04-26 19:36:48 -07002693
Mayank Rana511f3b22016-08-02 12:00:11 -07002694 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002695 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002696 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002697done:
2698 return NOTIFY_DONE;
2699}
Mayank Rana51958172017-02-28 14:49:21 -08002700/*
2701 * Handle EUD based soft detach/attach event, and force USB high speed mode
2702 * functionality on receiving soft attach event.
2703 *
2704 * @nb - notifier handler
2705 * @event - event information i.e. soft detach/attach event
2706 * @ptr - extcon_dev pointer
2707 *
2708 * @return int - NOTIFY_DONE always due to EUD
2709 */
2710static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2711 unsigned long event, void *ptr)
2712{
2713 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2714 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2715 struct extcon_dev *edev = ptr;
2716
2717 if (!edev) {
2718 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2719 goto done;
2720 }
2721
2722 dbg_event(0xFF, "EUD_NB", event);
2723 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2724 if (mdwc->vbus_active == event)
2725 return NOTIFY_DONE;
2726
2727 /* Force USB High-Speed enumeration Only */
2728 dwc->maximum_speed = USB_SPEED_HIGH;
2729 dbg_event(0xFF, "Speed", dwc->maximum_speed);
2730 mdwc->vbus_active = event;
2731 if (dwc->is_drd && !mdwc->in_restart)
2732 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2733done:
2734 return NOTIFY_DONE;
2735}
Mayank Rana511f3b22016-08-02 12:00:11 -07002736
2737static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2738{
2739 struct device_node *node = mdwc->dev->of_node;
2740 struct extcon_dev *edev;
2741 int ret = 0;
2742
2743 if (!of_property_read_bool(node, "extcon"))
2744 return 0;
2745
Mayank Rana51958172017-02-28 14:49:21 -08002746 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002747 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2748 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2749 return PTR_ERR(edev);
2750
2751 if (!IS_ERR(edev)) {
2752 mdwc->extcon_vbus = edev;
2753 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2754 ret = extcon_register_notifier(edev, EXTCON_USB,
2755 &mdwc->vbus_nb);
2756 if (ret < 0) {
2757 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2758 return ret;
2759 }
2760 }
2761
Mayank Rana51958172017-02-28 14:49:21 -08002762 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002763 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2764 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2765 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2766 ret = PTR_ERR(edev);
2767 goto err;
2768 }
2769 }
2770
2771 if (!IS_ERR(edev)) {
2772 mdwc->extcon_id = edev;
2773 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2774 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2775 &mdwc->id_nb);
2776 if (ret < 0) {
2777 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2778 goto err;
2779 }
2780 }
2781
Mayank Rana51958172017-02-28 14:49:21 -08002782 /* Use third phandle (optional) for EUD based detach/attach events */
2783 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2784 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2785 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2786 ret = PTR_ERR(edev);
2787 goto err;
2788 }
2789 }
2790
2791 if (!IS_ERR(edev)) {
2792 mdwc->extcon_eud = edev;
2793 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2794 ret = extcon_register_notifier(edev, EXTCON_USB,
2795 &mdwc->eud_event_nb);
2796 if (ret < 0) {
2797 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
2798 goto err1;
2799 }
2800 }
2801
Mayank Rana511f3b22016-08-02 12:00:11 -07002802 return 0;
Mayank Rana51958172017-02-28 14:49:21 -08002803err1:
2804 if (mdwc->extcon_id)
2805 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2806 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002807err:
2808 if (mdwc->extcon_vbus)
2809 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2810 &mdwc->vbus_nb);
2811 return ret;
2812}
2813
Jack Phambbe27962017-03-23 18:42:26 -07002814#define SMMU_BASE 0x10000000 /* Device address range base */
2815#define SMMU_SIZE 0x40000000 /* Device address range size */
2816
2817static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2818{
2819 struct device_node *node = mdwc->dev->of_node;
2820 int atomic_ctx = 1;
2821 int ret;
2822
2823 if (!of_property_read_bool(node, "iommus"))
2824 return 0;
2825
2826 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
2827 SMMU_BASE, SMMU_SIZE);
2828 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
2829 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
2830 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
2831 ret);
2832 return ret;
2833 }
2834 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
2835
2836 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
2837 &atomic_ctx);
2838 if (ret) {
2839 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
2840 ret);
2841 arm_iommu_release_mapping(mdwc->iommu_map);
2842 mdwc->iommu_map = NULL;
2843 return ret;
2844 }
2845
2846 return 0;
2847}
2848
Mayank Rana511f3b22016-08-02 12:00:11 -07002849static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2850 char *buf)
2851{
2852 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2853
2854 if (mdwc->vbus_active)
2855 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2856 if (mdwc->id_state == DWC3_ID_GROUND)
2857 return snprintf(buf, PAGE_SIZE, "host\n");
2858
2859 return snprintf(buf, PAGE_SIZE, "none\n");
2860}
2861
2862static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2863 const char *buf, size_t count)
2864{
2865 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2866
2867 if (sysfs_streq(buf, "peripheral")) {
2868 mdwc->vbus_active = true;
2869 mdwc->id_state = DWC3_ID_FLOAT;
2870 } else if (sysfs_streq(buf, "host")) {
2871 mdwc->vbus_active = false;
2872 mdwc->id_state = DWC3_ID_GROUND;
2873 } else {
2874 mdwc->vbus_active = false;
2875 mdwc->id_state = DWC3_ID_FLOAT;
2876 }
2877
2878 dwc3_ext_event_notify(mdwc);
2879
2880 return count;
2881}
2882
2883static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302884static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07002885
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08002886static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
2887 char *buf)
2888{
2889 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2890 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2891
2892 return snprintf(buf, PAGE_SIZE, "%s\n",
2893 usb_speed_string(dwc->max_hw_supp_speed));
2894}
2895
2896static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
2897 const char *buf, size_t count)
2898{
2899 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2900 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2901 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
2902
2903 if (sysfs_streq(buf, "high"))
2904 req_speed = USB_SPEED_HIGH;
2905 else if (sysfs_streq(buf, "super"))
2906 req_speed = USB_SPEED_SUPER;
2907
2908 if (req_speed != USB_SPEED_UNKNOWN &&
2909 req_speed != dwc->max_hw_supp_speed) {
2910 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
2911 schedule_work(&mdwc->restart_usb_work);
2912 }
2913
2914 return count;
2915}
2916static DEVICE_ATTR_RW(speed);
2917
Mayank Rana511f3b22016-08-02 12:00:11 -07002918static int dwc3_msm_probe(struct platform_device *pdev)
2919{
2920 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2921 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08002922 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07002923 struct dwc3_msm *mdwc;
2924 struct dwc3 *dwc;
2925 struct resource *res;
2926 void __iomem *tcsr;
2927 bool host_mode;
2928 int ret = 0;
2929 int ext_hub_reset_gpio;
2930 u32 val;
2931
2932 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2933 if (!mdwc)
2934 return -ENOMEM;
2935
2936 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2937 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2938 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2939 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2940 return -EOPNOTSUPP;
2941 }
2942 }
2943
2944 platform_set_drvdata(pdev, mdwc);
2945 mdwc->dev = &pdev->dev;
2946
2947 INIT_LIST_HEAD(&mdwc->req_complete_list);
2948 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2949 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2950 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002951 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002952 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302953 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002954
2955 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2956 if (!mdwc->dwc3_wq) {
2957 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2958 return -ENOMEM;
2959 }
2960
2961 /* Get all clks and gdsc reference */
2962 ret = dwc3_msm_get_clk_gdsc(mdwc);
2963 if (ret) {
2964 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2965 return ret;
2966 }
2967
2968 mdwc->id_state = DWC3_ID_FLOAT;
2969 set_bit(ID, &mdwc->inputs);
2970
2971 mdwc->charging_disabled = of_property_read_bool(node,
2972 "qcom,charging-disabled");
2973
2974 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2975 &mdwc->lpm_to_suspend_delay);
2976 if (ret) {
2977 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2978 mdwc->lpm_to_suspend_delay = 0;
2979 }
2980
2981 /*
2982 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2983 * DP and DM linestate transitions during low power mode.
2984 */
2985 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2986 if (mdwc->hs_phy_irq < 0) {
2987 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2988 ret = -EINVAL;
2989 goto err;
2990 } else {
2991 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2992 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2993 msm_dwc3_pwr_irq,
2994 msm_dwc3_pwr_irq_thread,
2995 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2996 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2997 if (ret) {
2998 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2999 ret);
3000 goto err;
3001 }
3002 }
3003
3004 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
3005 if (mdwc->ss_phy_irq < 0) {
3006 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
3007 } else {
3008 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
3009 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
3010 msm_dwc3_pwr_irq,
3011 msm_dwc3_pwr_irq_thread,
3012 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
3013 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
3014 if (ret) {
3015 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
3016 ret);
3017 goto err;
3018 }
3019 }
3020
3021 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
3022 if (mdwc->pwr_event_irq < 0) {
3023 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
3024 ret = -EINVAL;
3025 goto err;
3026 } else {
3027 /* will be enabled in dwc3_msm_resume() */
3028 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
3029 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
3030 msm_dwc3_pwr_irq,
3031 msm_dwc3_pwr_irq_thread,
3032 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
3033 "msm_dwc3", mdwc);
3034 if (ret) {
3035 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
3036 ret);
3037 goto err;
3038 }
3039 }
3040
3041 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3042 if (!res) {
3043 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3044 } else {
3045 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3046 resource_size(res));
3047 if (IS_ERR_OR_NULL(tcsr)) {
3048 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3049 } else {
3050 /* Enable USB3 on the primary USB port. */
3051 writel_relaxed(0x1, tcsr);
3052 /*
3053 * Ensure that TCSR write is completed before
3054 * USB registers initialization.
3055 */
3056 mb();
3057 }
3058 }
3059
3060 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3061 if (!res) {
3062 dev_err(&pdev->dev, "missing memory base resource\n");
3063 ret = -ENODEV;
3064 goto err;
3065 }
3066
3067 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3068 resource_size(res));
3069 if (!mdwc->base) {
3070 dev_err(&pdev->dev, "ioremap failed\n");
3071 ret = -ENODEV;
3072 goto err;
3073 }
3074
3075 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3076 "ahb2phy_base");
3077 if (res) {
3078 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3079 res->start, resource_size(res));
3080 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3081 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3082 mdwc->ahb2phy_base = NULL;
3083 } else {
3084 /*
3085 * On some targets cfg_ahb_clk depends upon usb gdsc
3086 * regulator. If cfg_ahb_clk is enabled without
3087 * turning on usb gdsc regulator clk is stuck off.
3088 */
3089 dwc3_msm_config_gdsc(mdwc, 1);
3090 clk_prepare_enable(mdwc->cfg_ahb_clk);
3091 /* Configure AHB2PHY for one wait state read/write*/
3092 val = readl_relaxed(mdwc->ahb2phy_base +
3093 PERIPH_SS_AHB2PHY_TOP_CFG);
3094 if (val != ONE_READ_WRITE_WAIT) {
3095 writel_relaxed(ONE_READ_WRITE_WAIT,
3096 mdwc->ahb2phy_base +
3097 PERIPH_SS_AHB2PHY_TOP_CFG);
3098 /* complete above write before using USB PHY */
3099 mb();
3100 }
3101 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3102 dwc3_msm_config_gdsc(mdwc, 0);
3103 }
3104 }
3105
3106 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3107 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3108 if (IS_ERR(mdwc->dbm)) {
3109 dev_err(&pdev->dev, "unable to get dbm device\n");
3110 ret = -EPROBE_DEFER;
3111 goto err;
3112 }
3113 /*
3114 * Add power event if the dbm indicates coming out of L1
3115 * by interrupt
3116 */
3117 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
3118 if (!mdwc->pwr_event_irq) {
3119 dev_err(&pdev->dev,
3120 "need pwr_event_irq exiting L1\n");
3121 ret = -EINVAL;
3122 goto err;
3123 }
3124 }
3125 }
3126
3127 ext_hub_reset_gpio = of_get_named_gpio(node,
3128 "qcom,ext-hub-reset-gpio", 0);
3129
3130 if (gpio_is_valid(ext_hub_reset_gpio)
3131 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3132 "qcom,ext-hub-reset-gpio"))) {
3133 /* reset external hub */
3134 gpio_direction_output(ext_hub_reset_gpio, 1);
3135 /*
3136 * Hub reset should be asserted for minimum 5microsec
3137 * before deasserting.
3138 */
3139 usleep_range(5, 1000);
3140 gpio_direction_output(ext_hub_reset_gpio, 0);
3141 }
3142
3143 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3144 &mdwc->tx_fifo_size))
3145 dev_err(&pdev->dev,
3146 "unable to read platform data tx fifo size\n");
3147
3148 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3149 "qcom,disable-host-mode-pm");
3150
3151 dwc3_set_notifier(&dwc3_msm_notify_event);
3152
Jack Phambbe27962017-03-23 18:42:26 -07003153 ret = dwc3_msm_init_iommu(mdwc);
3154 if (ret)
3155 goto err;
3156
Mayank Rana511f3b22016-08-02 12:00:11 -07003157 /* Assumes dwc3 is the first DT child of dwc3-msm */
3158 dwc3_node = of_get_next_available_child(node, NULL);
3159 if (!dwc3_node) {
3160 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3161 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003162 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003163 }
3164
3165 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3166 if (ret) {
3167 dev_err(&pdev->dev,
3168 "failed to add create dwc3 core\n");
3169 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003170 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003171 }
3172
3173 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3174 of_node_put(dwc3_node);
3175 if (!mdwc->dwc3) {
3176 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3177 goto put_dwc3;
3178 }
3179
3180 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3181 "usb-phy", 0);
3182 if (IS_ERR(mdwc->hs_phy)) {
3183 dev_err(&pdev->dev, "unable to get hsphy device\n");
3184 ret = PTR_ERR(mdwc->hs_phy);
3185 goto put_dwc3;
3186 }
3187 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3188 "usb-phy", 1);
3189 if (IS_ERR(mdwc->ss_phy)) {
3190 dev_err(&pdev->dev, "unable to get ssphy device\n");
3191 ret = PTR_ERR(mdwc->ss_phy);
3192 goto put_dwc3;
3193 }
3194
3195 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3196 if (mdwc->bus_scale_table) {
3197 mdwc->bus_perf_client =
3198 msm_bus_scale_register_client(mdwc->bus_scale_table);
3199 }
3200
3201 dwc = platform_get_drvdata(mdwc->dwc3);
3202 if (!dwc) {
3203 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3204 goto put_dwc3;
3205 }
3206
3207 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3208 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3209
3210 if (cpu_to_affin)
3211 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3212
Mayank Ranaf4918d32016-12-15 13:35:55 -08003213 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3214 &mdwc->num_gsi_event_buffers);
3215
Mayank Rana511f3b22016-08-02 12:00:11 -07003216 /*
3217 * Clocks and regulators will not be turned on until the first time
3218 * runtime PM resume is called. This is to allow for booting up with
3219 * charger already connected so as not to disturb PHY line states.
3220 */
3221 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3222 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003223 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3224 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003225 device_init_wakeup(mdwc->dev, 1);
3226
3227 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3228 pm_runtime_get_noresume(mdwc->dev);
3229
3230 ret = dwc3_msm_extcon_register(mdwc);
3231 if (ret)
3232 goto put_dwc3;
3233
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303234 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3235 &mdwc->pm_qos_latency);
3236 if (ret) {
3237 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3238 mdwc->pm_qos_latency = 0;
3239 }
3240
Hemant Kumar8220a982017-01-19 18:11:34 -08003241 mdwc->usb_psy = power_supply_get_by_name("usb");
3242 if (!mdwc->usb_psy) {
3243 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3244 pval.intval = -EINVAL;
3245 } else {
3246 power_supply_get_property(mdwc->usb_psy,
3247 POWER_SUPPLY_PROP_PRESENT, &pval);
3248 }
3249
Mayank Rana511f3b22016-08-02 12:00:11 -07003250 /* Update initial VBUS/ID state from extcon */
3251 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
3252 EXTCON_USB))
3253 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Hemant Kumar8220a982017-01-19 18:11:34 -08003254 else if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003255 EXTCON_USB_HOST))
3256 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003257 else if (!pval.intval) {
3258 /* USB cable is not connected */
3259 schedule_delayed_work(&mdwc->sm_work, 0);
3260 } else {
3261 if (pval.intval > 0)
3262 dev_info(mdwc->dev, "charger detection in progress\n");
3263 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003264
3265 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003266 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003267
Mayank Rana511f3b22016-08-02 12:00:11 -07003268 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3269 if (!dwc->is_drd && host_mode) {
3270 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3271 mdwc->id_state = DWC3_ID_GROUND;
3272 dwc3_ext_event_notify(mdwc);
3273 }
3274
3275 return 0;
3276
3277put_dwc3:
3278 platform_device_put(mdwc->dwc3);
3279 if (mdwc->bus_perf_client)
3280 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Jack Phambbe27962017-03-23 18:42:26 -07003281uninit_iommu:
3282 if (mdwc->iommu_map)
3283 arm_iommu_release_mapping(mdwc->iommu_map);
Mayank Rana511f3b22016-08-02 12:00:11 -07003284err:
3285 return ret;
3286}
3287
3288static int dwc3_msm_remove_children(struct device *dev, void *data)
3289{
3290 device_unregister(dev);
3291 return 0;
3292}
3293
3294static int dwc3_msm_remove(struct platform_device *pdev)
3295{
3296 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003297 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003298 int ret_pm;
3299
3300 device_remove_file(&pdev->dev, &dev_attr_mode);
3301
3302 if (cpu_to_affin)
3303 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3304
3305 /*
3306 * In case of system suspend, pm_runtime_get_sync fails.
3307 * Hence turn ON the clocks manually.
3308 */
3309 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003310 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003311 if (ret_pm < 0) {
3312 dev_err(mdwc->dev,
3313 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303314 if (mdwc->noc_aggr_clk)
3315 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003316 clk_prepare_enable(mdwc->utmi_clk);
3317 clk_prepare_enable(mdwc->core_clk);
3318 clk_prepare_enable(mdwc->iface_clk);
3319 clk_prepare_enable(mdwc->sleep_clk);
3320 if (mdwc->bus_aggr_clk)
3321 clk_prepare_enable(mdwc->bus_aggr_clk);
3322 clk_prepare_enable(mdwc->xo_clk);
3323 }
3324
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303325 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003326 cancel_delayed_work_sync(&mdwc->sm_work);
3327
3328 if (mdwc->hs_phy)
3329 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3330 platform_device_put(mdwc->dwc3);
3331 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3332
Mayank Rana08e41922017-03-02 15:25:48 -08003333 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003334 pm_runtime_disable(mdwc->dev);
3335 pm_runtime_barrier(mdwc->dev);
3336 pm_runtime_put_sync(mdwc->dev);
3337 pm_runtime_set_suspended(mdwc->dev);
3338 device_wakeup_disable(mdwc->dev);
3339
3340 if (mdwc->bus_perf_client)
3341 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3342
3343 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3344 regulator_disable(mdwc->vbus_reg);
3345
3346 disable_irq(mdwc->hs_phy_irq);
3347 if (mdwc->ss_phy_irq)
3348 disable_irq(mdwc->ss_phy_irq);
3349 disable_irq(mdwc->pwr_event_irq);
3350
3351 clk_disable_unprepare(mdwc->utmi_clk);
3352 clk_set_rate(mdwc->core_clk, 19200000);
3353 clk_disable_unprepare(mdwc->core_clk);
3354 clk_disable_unprepare(mdwc->iface_clk);
3355 clk_disable_unprepare(mdwc->sleep_clk);
3356 clk_disable_unprepare(mdwc->xo_clk);
3357 clk_put(mdwc->xo_clk);
3358
3359 dwc3_msm_config_gdsc(mdwc, 0);
3360
Jack Phambbe27962017-03-23 18:42:26 -07003361 if (mdwc->iommu_map) {
3362 if (!atomic_read(&dwc->in_lpm))
3363 arm_iommu_detach_device(mdwc->dev);
3364 arm_iommu_release_mapping(mdwc->iommu_map);
3365 }
3366
Mayank Rana511f3b22016-08-02 12:00:11 -07003367 return 0;
3368}
3369
Jack Pham4d4e9342016-12-07 19:25:02 -08003370static int dwc3_msm_host_notifier(struct notifier_block *nb,
3371 unsigned long event, void *ptr)
3372{
3373 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3374 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3375 struct usb_device *udev = ptr;
3376 union power_supply_propval pval;
3377 unsigned int max_power;
3378
3379 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3380 return NOTIFY_DONE;
3381
3382 if (!mdwc->usb_psy) {
3383 mdwc->usb_psy = power_supply_get_by_name("usb");
3384 if (!mdwc->usb_psy)
3385 return NOTIFY_DONE;
3386 }
3387
3388 /*
3389 * For direct-attach devices, new udev is direct child of root hub
3390 * i.e. dwc -> xhci -> root_hub -> udev
3391 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3392 */
3393 if (udev->parent && !udev->parent->parent &&
3394 udev->dev.parent->parent == &dwc->xhci->dev) {
3395 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003396 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3397 /*
3398 * Core clock rate can be reduced only if root
3399 * hub SS port is not enabled/connected.
3400 */
3401 clk_set_rate(mdwc->core_clk,
3402 mdwc->core_clk_rate_hs);
3403 dev_dbg(mdwc->dev,
3404 "set hs core clk rate %ld\n",
3405 mdwc->core_clk_rate_hs);
3406 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3407 } else {
3408 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3409 }
3410
Jack Pham4d4e9342016-12-07 19:25:02 -08003411 if (udev->speed >= USB_SPEED_SUPER)
3412 max_power = udev->actconfig->desc.bMaxPower * 8;
3413 else
3414 max_power = udev->actconfig->desc.bMaxPower * 2;
3415 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3416 dev_name(&udev->dev), max_power);
3417
3418 /* inform PMIC of max power so it can optimize boost */
3419 pval.intval = max_power * 1000;
3420 power_supply_set_property(mdwc->usb_psy,
3421 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3422 } else {
3423 pval.intval = 0;
3424 power_supply_set_property(mdwc->usb_psy,
3425 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003426
3427 /* set rate back to default core clk rate */
3428 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3429 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3430 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003431 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003432 }
3433 }
3434
3435 return NOTIFY_DONE;
3436}
3437
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303438static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3439{
3440 static bool curr_perf_mode;
3441 int latency = mdwc->pm_qos_latency;
3442
3443 if ((curr_perf_mode == perf_mode) || !latency)
3444 return;
3445
3446 if (perf_mode)
3447 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3448 else
3449 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3450 PM_QOS_DEFAULT_VALUE);
3451
3452 curr_perf_mode = perf_mode;
3453 pr_debug("%s: latency updated to: %d\n", __func__,
3454 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3455}
3456
3457static void msm_dwc3_perf_vote_work(struct work_struct *w)
3458{
3459 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3460 perf_vote_work.work);
3461 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3462 static unsigned long last_irq_cnt;
3463 bool in_perf_mode = false;
3464
3465 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3466 in_perf_mode = true;
3467
3468 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3469 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3470
3471 last_irq_cnt = dwc->irq_cnt;
3472 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3473 schedule_delayed_work(&mdwc->perf_vote_work,
3474 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3475}
3476
Mayank Rana511f3b22016-08-02 12:00:11 -07003477#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3478
3479/**
3480 * dwc3_otg_start_host - helper function for starting/stoping the host
3481 * controller driver.
3482 *
3483 * @mdwc: Pointer to the dwc3_msm structure.
3484 * @on: start / stop the host controller driver.
3485 *
3486 * Returns 0 on success otherwise negative errno.
3487 */
3488static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3489{
3490 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3491 int ret = 0;
3492
3493 if (!dwc->xhci)
3494 return -EINVAL;
3495
3496 /*
3497 * The vbus_reg pointer could have multiple values
3498 * NULL: regulator_get() hasn't been called, or was previously deferred
3499 * IS_ERR: regulator could not be obtained, so skip using it
3500 * Valid pointer otherwise
3501 */
3502 if (!mdwc->vbus_reg) {
3503 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3504 "vbus_dwc3");
3505 if (IS_ERR(mdwc->vbus_reg) &&
3506 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3507 /* regulators may not be ready, so retry again later */
3508 mdwc->vbus_reg = NULL;
3509 return -EPROBE_DEFER;
3510 }
3511 }
3512
3513 if (on) {
3514 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3515
Mayank Rana511f3b22016-08-02 12:00:11 -07003516 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003517 if (dwc->maximum_speed == USB_SPEED_SUPER)
3518 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3519
3520 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003521 dbg_event(0xFF, "StrtHost gync",
3522 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003523 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3524 if (!IS_ERR(mdwc->vbus_reg))
3525 ret = regulator_enable(mdwc->vbus_reg);
3526 if (ret) {
3527 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3528 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3529 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3530 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003531 dbg_event(0xFF, "vregerr psync",
3532 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003533 return ret;
3534 }
3535
3536 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3537
Jack Pham4d4e9342016-12-07 19:25:02 -08003538 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3539 usb_register_notify(&mdwc->host_nb);
3540
Manu Gautam976fdfc2016-08-18 09:27:35 +05303541 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3542 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003543 /*
3544 * FIXME If micro A cable is disconnected during system suspend,
3545 * xhci platform device will be removed before runtime pm is
3546 * enabled for xhci device. Due to this, disable_depth becomes
3547 * greater than one and runtimepm is not enabled for next microA
3548 * connect. Fix this by calling pm_runtime_init for xhci device.
3549 */
3550 pm_runtime_init(&dwc->xhci->dev);
3551 ret = platform_device_add(dwc->xhci);
3552 if (ret) {
3553 dev_err(mdwc->dev,
3554 "%s: failed to add XHCI pdev ret=%d\n",
3555 __func__, ret);
3556 if (!IS_ERR(mdwc->vbus_reg))
3557 regulator_disable(mdwc->vbus_reg);
3558 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3559 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3560 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003561 dbg_event(0xFF, "pdeverr psync",
3562 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003563 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003564 return ret;
3565 }
3566
3567 /*
3568 * In some cases it is observed that USB PHY is not going into
3569 * suspend with host mode suspend functionality. Hence disable
3570 * XHCI's runtime PM here if disable_host_mode_pm is set.
3571 */
3572 if (mdwc->disable_host_mode_pm)
3573 pm_runtime_disable(&dwc->xhci->dev);
3574
3575 mdwc->in_host_mode = true;
3576 dwc3_usb3_phy_suspend(dwc, true);
3577
3578 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003579 dbg_event(0xFF, "StrtHost psync",
3580 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003581 pm_runtime_mark_last_busy(mdwc->dev);
3582 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303583#ifdef CONFIG_SMP
3584 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3585 mdwc->pm_qos_req_dma.irq = dwc->irq;
3586#endif
3587 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3588 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3589 /* start in perf mode for better performance initially */
3590 msm_dwc3_perf_vote_update(mdwc, true);
3591 schedule_delayed_work(&mdwc->perf_vote_work,
3592 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003593 } else {
3594 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3595
Manu Gautam976fdfc2016-08-18 09:27:35 +05303596 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003597 if (!IS_ERR(mdwc->vbus_reg))
3598 ret = regulator_disable(mdwc->vbus_reg);
3599 if (ret) {
3600 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3601 return ret;
3602 }
3603
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303604 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3605 msm_dwc3_perf_vote_update(mdwc, false);
3606 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3607
Mayank Rana511f3b22016-08-02 12:00:11 -07003608 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003609 dbg_event(0xFF, "StopHost gsync",
3610 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003611 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3612 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3613 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3614 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003615 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003616
3617 /*
3618 * Perform USB hardware RESET (both core reset and DBM reset)
3619 * when moving from host to peripheral. This is required for
3620 * peripheral mode to work.
3621 */
3622 dwc3_msm_block_reset(mdwc, true);
3623
3624 dwc3_usb3_phy_suspend(dwc, false);
3625 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3626
3627 mdwc->in_host_mode = false;
3628
3629 /* re-init core and OTG registers as block reset clears these */
3630 dwc3_post_host_reset_core_init(dwc);
3631 pm_runtime_mark_last_busy(mdwc->dev);
3632 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003633 dbg_event(0xFF, "StopHost psync",
3634 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003635 }
3636
3637 return 0;
3638}
3639
3640static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3641{
3642 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3643
3644 /* Update OTG VBUS Valid from HSPHY to controller */
3645 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3646 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3647 UTMI_OTG_VBUS_VALID,
3648 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3649
3650 /* Update only if Super Speed is supported */
3651 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3652 /* Update VBUS Valid from SSPHY to controller */
3653 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3654 LANE0_PWR_PRESENT,
3655 vbus_present ? LANE0_PWR_PRESENT : 0);
3656 }
3657}
3658
3659/**
3660 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3661 *
3662 * @mdwc: Pointer to the dwc3_msm structure.
3663 * @on: Turn ON/OFF the gadget.
3664 *
3665 * Returns 0 on success otherwise negative errno.
3666 */
3667static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3668{
3669 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3670
3671 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003672 dbg_event(0xFF, "StrtGdgt gsync",
3673 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003674
3675 if (on) {
3676 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3677 __func__, dwc->gadget.name);
3678
3679 dwc3_override_vbus_status(mdwc, true);
3680 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3681 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3682
3683 /*
3684 * Core reset is not required during start peripheral. Only
3685 * DBM reset is required, hence perform only DBM reset here.
3686 */
3687 dwc3_msm_block_reset(mdwc, false);
3688
3689 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3690 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303691#ifdef CONFIG_SMP
3692 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3693 mdwc->pm_qos_req_dma.irq = dwc->irq;
3694#endif
3695 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3696 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3697 /* start in perf mode for better performance initially */
3698 msm_dwc3_perf_vote_update(mdwc, true);
3699 schedule_delayed_work(&mdwc->perf_vote_work,
3700 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003701 } else {
3702 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3703 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303704 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3705 msm_dwc3_perf_vote_update(mdwc, false);
3706 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3707
Mayank Rana511f3b22016-08-02 12:00:11 -07003708 usb_gadget_vbus_disconnect(&dwc->gadget);
3709 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3710 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3711 dwc3_override_vbus_status(mdwc, false);
3712 dwc3_usb3_phy_suspend(dwc, false);
3713 }
3714
3715 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003716 dbg_event(0xFF, "StopGdgt psync",
3717 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003718
3719 return 0;
3720}
3721
3722static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3723{
Jack Pham8caff352016-08-19 16:33:55 -07003724 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003725 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003726
3727 if (mdwc->charging_disabled)
3728 return 0;
3729
3730 if (mdwc->max_power == mA)
3731 return 0;
3732
3733 if (!mdwc->usb_psy) {
3734 mdwc->usb_psy = power_supply_get_by_name("usb");
3735 if (!mdwc->usb_psy) {
3736 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3737 return -ENODEV;
3738 }
3739 }
3740
Jack Pham8caff352016-08-19 16:33:55 -07003741 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3742 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3743 return 0;
3744
Mayank Rana511f3b22016-08-02 12:00:11 -07003745 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3746
Mayank Rana511f3b22016-08-02 12:00:11 -07003747 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003748 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003749 ret = power_supply_set_property(mdwc->usb_psy,
3750 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3751 if (ret) {
3752 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3753 return ret;
3754 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003755
3756 mdwc->max_power = mA;
3757 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003758}
3759
3760
3761/**
3762 * dwc3_otg_sm_work - workqueue function.
3763 *
3764 * @w: Pointer to the dwc3 otg workqueue
3765 *
3766 * NOTE: After any change in otg_state, we must reschdule the state machine.
3767 */
3768static void dwc3_otg_sm_work(struct work_struct *w)
3769{
3770 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3771 struct dwc3 *dwc = NULL;
3772 bool work = 0;
3773 int ret = 0;
3774 unsigned long delay = 0;
3775 const char *state;
3776
3777 if (mdwc->dwc3)
3778 dwc = platform_get_drvdata(mdwc->dwc3);
3779
3780 if (!dwc) {
3781 dev_err(mdwc->dev, "dwc is NULL.\n");
3782 return;
3783 }
3784
3785 state = usb_otg_state_string(mdwc->otg_state);
3786 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08003787 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003788
3789 /* Check OTG state */
3790 switch (mdwc->otg_state) {
3791 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08003792 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07003793 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08003794 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
3795 dbg_event(0xFF, "undef_id_!bsv", 0);
3796 pm_runtime_set_active(mdwc->dev);
3797 pm_runtime_enable(mdwc->dev);
3798 pm_runtime_get_noresume(mdwc->dev);
3799 dwc3_msm_resume(mdwc);
3800 pm_runtime_put_sync(mdwc->dev);
3801 dbg_event(0xFF, "Undef NoUSB",
3802 atomic_read(&mdwc->dev->power.usage_count));
3803 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003804 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08003805 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003806
Mayank Rana08e41922017-03-02 15:25:48 -08003807 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003808 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08003809 pm_runtime_set_suspended(mdwc->dev);
3810 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003811 /* fall-through */
3812 case OTG_STATE_B_IDLE:
3813 if (!test_bit(ID, &mdwc->inputs)) {
3814 dev_dbg(mdwc->dev, "!id\n");
3815 mdwc->otg_state = OTG_STATE_A_IDLE;
3816 work = 1;
3817 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3818 dev_dbg(mdwc->dev, "b_sess_vld\n");
3819 /*
3820 * Increment pm usage count upon cable connect. Count
3821 * is decremented in OTG_STATE_B_PERIPHERAL state on
3822 * cable disconnect or in bus suspend.
3823 */
3824 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003825 dbg_event(0xFF, "BIDLE gsync",
3826 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003827 dwc3_otg_start_peripheral(mdwc, 1);
3828 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3829 work = 1;
3830 } else {
3831 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3832 dev_dbg(mdwc->dev, "Cable disconnected\n");
3833 }
3834 break;
3835
3836 case OTG_STATE_B_PERIPHERAL:
3837 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3838 !test_bit(ID, &mdwc->inputs)) {
3839 dev_dbg(mdwc->dev, "!id || !bsv\n");
3840 mdwc->otg_state = OTG_STATE_B_IDLE;
3841 dwc3_otg_start_peripheral(mdwc, 0);
3842 /*
3843 * Decrement pm usage count upon cable disconnect
3844 * which was incremented upon cable connect in
3845 * OTG_STATE_B_IDLE state
3846 */
3847 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003848 dbg_event(0xFF, "!BSV psync",
3849 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003850 work = 1;
3851 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3852 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3853 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3854 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3855 /*
3856 * Decrement pm usage count upon bus suspend.
3857 * Count was incremented either upon cable
3858 * connect in OTG_STATE_B_IDLE or host
3859 * initiated resume after bus suspend in
3860 * OTG_STATE_B_SUSPEND state
3861 */
3862 pm_runtime_mark_last_busy(mdwc->dev);
3863 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003864 dbg_event(0xFF, "SUSP put",
3865 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003866 }
3867 break;
3868
3869 case OTG_STATE_B_SUSPEND:
3870 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3871 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3872 mdwc->otg_state = OTG_STATE_B_IDLE;
3873 dwc3_otg_start_peripheral(mdwc, 0);
3874 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3875 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3876 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3877 /*
3878 * Increment pm usage count upon host
3879 * initiated resume. Count was decremented
3880 * upon bus suspend in
3881 * OTG_STATE_B_PERIPHERAL state.
3882 */
3883 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003884 dbg_event(0xFF, "!SUSP gsync",
3885 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003886 }
3887 break;
3888
3889 case OTG_STATE_A_IDLE:
3890 /* Switch to A-Device*/
3891 if (test_bit(ID, &mdwc->inputs)) {
3892 dev_dbg(mdwc->dev, "id\n");
3893 mdwc->otg_state = OTG_STATE_B_IDLE;
3894 mdwc->vbus_retry_count = 0;
3895 work = 1;
3896 } else {
3897 mdwc->otg_state = OTG_STATE_A_HOST;
3898 ret = dwc3_otg_start_host(mdwc, 1);
3899 if ((ret == -EPROBE_DEFER) &&
3900 mdwc->vbus_retry_count < 3) {
3901 /*
3902 * Get regulator failed as regulator driver is
3903 * not up yet. Will try to start host after 1sec
3904 */
3905 mdwc->otg_state = OTG_STATE_A_IDLE;
3906 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3907 delay = VBUS_REG_CHECK_DELAY;
3908 work = 1;
3909 mdwc->vbus_retry_count++;
3910 } else if (ret) {
3911 dev_err(mdwc->dev, "unable to start host\n");
3912 mdwc->otg_state = OTG_STATE_A_IDLE;
3913 goto ret;
3914 }
3915 }
3916 break;
3917
3918 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05303919 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
3920 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003921 dwc3_otg_start_host(mdwc, 0);
3922 mdwc->otg_state = OTG_STATE_B_IDLE;
3923 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05303924 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07003925 work = 1;
3926 } else {
3927 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003928 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003929 if (dwc)
3930 pm_runtime_resume(&dwc->xhci->dev);
3931 }
3932 break;
3933
3934 default:
3935 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3936
3937 }
3938
3939 if (work)
3940 schedule_delayed_work(&mdwc->sm_work, delay);
3941
3942ret:
3943 return;
3944}
3945
3946#ifdef CONFIG_PM_SLEEP
3947static int dwc3_msm_pm_suspend(struct device *dev)
3948{
3949 int ret = 0;
3950 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3951 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3952
3953 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003954 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003955
3956 flush_workqueue(mdwc->dwc3_wq);
3957 if (!atomic_read(&dwc->in_lpm)) {
3958 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3959 return -EBUSY;
3960 }
3961
3962 ret = dwc3_msm_suspend(mdwc);
3963 if (!ret)
3964 atomic_set(&mdwc->pm_suspended, 1);
3965
3966 return ret;
3967}
3968
3969static int dwc3_msm_pm_resume(struct device *dev)
3970{
3971 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003972 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003973
3974 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003975 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003976
Mayank Rana511f3b22016-08-02 12:00:11 -07003977 /* flush to avoid race in read/write of pm_suspended */
3978 flush_workqueue(mdwc->dwc3_wq);
3979 atomic_set(&mdwc->pm_suspended, 0);
3980
3981 /* kick in otg state machine */
3982 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3983
3984 return 0;
3985}
3986#endif
3987
3988#ifdef CONFIG_PM
3989static int dwc3_msm_runtime_idle(struct device *dev)
3990{
Mayank Rana08e41922017-03-02 15:25:48 -08003991 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3992 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3993
Mayank Rana511f3b22016-08-02 12:00:11 -07003994 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08003995 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003996
3997 return 0;
3998}
3999
4000static int dwc3_msm_runtime_suspend(struct device *dev)
4001{
4002 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004003 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004004
4005 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004006 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004007
4008 return dwc3_msm_suspend(mdwc);
4009}
4010
4011static int dwc3_msm_runtime_resume(struct device *dev)
4012{
4013 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004014 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004015
4016 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004017 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004018
4019 return dwc3_msm_resume(mdwc);
4020}
4021#endif
4022
4023static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4024 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4025 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4026 dwc3_msm_runtime_idle)
4027};
4028
4029static const struct of_device_id of_dwc3_matach[] = {
4030 {
4031 .compatible = "qcom,dwc-usb3-msm",
4032 },
4033 { },
4034};
4035MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4036
4037static struct platform_driver dwc3_msm_driver = {
4038 .probe = dwc3_msm_probe,
4039 .remove = dwc3_msm_remove,
4040 .driver = {
4041 .name = "msm-dwc3",
4042 .pm = &dwc3_msm_dev_pm_ops,
4043 .of_match_table = of_dwc3_matach,
4044 },
4045};
4046
4047MODULE_LICENSE("GPL v2");
4048MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4049
4050static int dwc3_msm_init(void)
4051{
4052 return platform_driver_register(&dwc3_msm_driver);
4053}
4054module_init(dwc3_msm_init);
4055
4056static void __exit dwc3_msm_exit(void)
4057{
4058 platform_driver_unregister(&dwc3_msm_driver);
4059}
4060module_exit(dwc3_msm_exit);